diff --git a/build-scripts/build_llvm.py b/build-scripts/build_llvm.py index ec6bb39548..92e8d9ab33 100755 --- a/build-scripts/build_llvm.py +++ b/build-scripts/build_llvm.py @@ -108,7 +108,7 @@ def build_llvm(llvm_dir, platform, backends, projects, use_clang=False, extra_fl LLVM_TARGETS_TO_BUILD = [ '-DLLVM_TARGETS_TO_BUILD:STRING="' + ";".join(normal_backends) + '"' if normal_backends - else '-DLLVM_TARGETS_TO_BUILD:STRING="AArch64;ARM;Mips;RISCV;X86"' + else '-DLLVM_TARGETS_TO_BUILD:STRING="AArch64;ARM;Mips;RISCV;LoongArch;X86"' ] # if not on ARC platform, but want to add expeirmental backend ARC as target @@ -238,7 +238,7 @@ def main(): parser.add_argument( "--platform", type=str, - choices=["android", "arc", "darwin", "linux", "windows", "xtensa"], + choices=["android", "arc", "darwin", "linux", "windows", "xtensa", "loongarch"], help="identify current platform", ) parser.add_argument( @@ -254,6 +254,7 @@ def main(): "WebAssembly", "X86", "Xtensa", + "LoongArch", ], default=[], help="identify LLVM supported backends, separate by space, like '--arch ARM Mips X86'", @@ -307,6 +308,11 @@ def main(): "repo_ssh": "git@github.com:espressif/llvm-project.git", "branch": "xtensa_release_17.0.1", }, + "loongarch": { + "repo": "https://github.com/llvm/llvm-project.git", + "repo_ssh": "git@github.com:llvm/llvm-project.git", + "branch": "release/19.x", + }, "default": { "repo": "https://github.com/llvm/llvm-project.git", "repo_ssh": "git@github.com:llvm/llvm-project.git", diff --git a/build-scripts/config_common.cmake b/build-scripts/config_common.cmake index 48c5f7be4b..5121ab98f5 100644 --- a/build-scripts/config_common.cmake +++ b/build-scripts/config_common.cmake @@ -43,6 +43,10 @@ elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32F") add_definitions(-DBUILD_TARGET_RISCV32_ILP32F) elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32") add_definitions(-DBUILD_TARGET_RISCV32_ILP32) +elseif (WAMR_BUILD_TARGET STREQUAL "LOONGARCH64" OR WAMR_BUILD_TARGET STREQUAL "LOONGARCH64_LP64D") + add_definitions(-DBUILD_TARGET_LOONGARCH64_LP64D) +elseif (WAMR_BUILD_TARGET STREQUAL "LOONGARCH64_LP64") + add_definitions(-DBUILD_TARGET_LOONGARCH64_LP64) elseif (WAMR_BUILD_TARGET STREQUAL "ARC") add_definitions(-DBUILD_TARGET_ARC) else () @@ -55,7 +59,8 @@ endif () if (CMAKE_SIZEOF_VOID_P EQUAL 8) if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64" - OR WAMR_BUILD_TARGET MATCHES "AARCH64.*" OR WAMR_BUILD_TARGET MATCHES "RISCV64.*") + OR WAMR_BUILD_TARGET MATCHES "AARCH64.*" OR WAMR_BUILD_TARGET MATCHES "RISCV64.*" + OR WAMR_BUILD_TARGET MATCHES "LOONGARCH64.*") if (NOT WAMR_BUILD_PLATFORM STREQUAL "windows") # Add -fPIC flag if build as 64-bit set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") @@ -317,11 +322,12 @@ else () message (" Wakeup of blocking operations enabled") endif () if (WAMR_BUILD_SIMD EQUAL 1) - if (NOT WAMR_BUILD_TARGET MATCHES "RISCV64.*") + if ((NOT WAMR_BUILD_TARGET MATCHES "RISCV64.*") + AND (NOT WAMR_BUILD_TARGET MATCHES "LOONGARCH64.*")) add_definitions (-DWASM_ENABLE_SIMD=1) message (" SIMD enabled") else () - message (" SIMD disabled due to not supported on target RISCV64") + message (" SIMD disabled due to not supported on target RISCV64 or LOONGARCH64") endif () endif () if (WAMR_BUILD_AOT_STACK_FRAME EQUAL 1) diff --git a/build-scripts/runtime_lib.cmake b/build-scripts/runtime_lib.cmake index c57cfc57af..7a07037efa 100644 --- a/build-scripts/runtime_lib.cmake +++ b/build-scripts/runtime_lib.cmake @@ -31,11 +31,14 @@ endif () # Set WAMR_BUILD_TARGET, currently values supported: # "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]", # "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]" +# "LOONGARCH64[sub]" if (NOT DEFINED WAMR_BUILD_TARGET) if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64|aarch64)") set (WAMR_BUILD_TARGET "AARCH64") elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64") set (WAMR_BUILD_TARGET "RISCV64") + elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "loongarch64") + set (WAMR_BUILD_TARGET "LOONGARCH64") elseif (CMAKE_SIZEOF_VOID_P EQUAL 8) # Build as X86_64 by default in 64-bit platform set (WAMR_BUILD_TARGET "X86_64") diff --git a/core/config.h b/core/config.h index f08d828d27..83858196cc 100644 --- a/core/config.h +++ b/core/config.h @@ -22,6 +22,8 @@ && !defined(BUILD_TARGET_RISCV32_ILP32D) \ && !defined(BUILD_TARGET_RISCV32_ILP32F) \ && !defined(BUILD_TARGET_RISCV32_ILP32) \ + && !defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + && !defined(BUILD_TARGET_LOONGARCH64_LP64) \ && !defined(BUILD_TARGET_ARC) /* clang-format on */ #if defined(__x86_64__) || defined(__x86_64) @@ -50,6 +52,8 @@ #define BUILD_TARGET_RISCV32_ILP32F #elif defined(__riscv) && (__riscv_xlen == 32) && (__riscv_flen == 64) #define BUILD_TARGET_RISCV32_ILP32D +#elif defined(__loongarch) && (__loongarch_grlen == 64) +#define BUILD_TARGET_LOONGARCH64_LP64D #elif defined(__arc__) #define BUILD_TARGET_ARC #else diff --git a/core/iwasm/aot/aot_intrinsic.c b/core/iwasm/aot/aot_intrinsic.c index 245c7a6515..2ecc3e5776 100644 --- a/core/iwasm/aot/aot_intrinsic.c +++ b/core/iwasm/aot/aot_intrinsic.c @@ -867,6 +867,16 @@ aot_intrinsic_fill_capability_flags(AOTCompContext *comp_ctx) add_i64_common_intrinsics(comp_ctx); } } + else if (!strncmp(comp_ctx->target_arch, "loongarch", 9)) { + add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I32_CONST); + /* + * Note: Use builtin intrinsics since hardware float operation + * will cause rodata relocation + */ + add_f32_common_intrinsics(comp_ctx); + add_f64_common_intrinsics(comp_ctx); + add_common_float_integer_conversion(comp_ctx); + } else if (!strncmp(comp_ctx->target_arch, "xtensa", 6)) { /* * Note: Use builtin intrinsics since hardware float operation diff --git a/core/iwasm/aot/aot_loader.c b/core/iwasm/aot/aot_loader.c index bde3ee034d..f98a911463 100644 --- a/core/iwasm/aot/aot_loader.c +++ b/core/iwasm/aot/aot_loader.c @@ -273,6 +273,7 @@ GET_U16_FROM_ADDR(const uint8 *p) #define E_MACHINE_ARC_COMPACT2 195 /* Synopsys ARCompact V2 */ #define E_MACHINE_XTENSA 94 /* Tensilica Xtensa Architecture */ #define E_MACHINE_RISCV 243 /* RISC-V 32/64 */ +#define E_MACHINE_LOONGARCH 258 /* LoongArch 32/64 */ #define E_MACHINE_WIN_I386 0x14c /* Windows i386 architecture */ #define E_MACHINE_WIN_X86_64 0x8664 /* Windows x86-64 architecture */ @@ -303,7 +304,9 @@ loader_mmap(uint32 size, bool prot_exec, char *error_buf, uint32 error_buf_size) #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \ || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) #if !defined(__APPLE__) && !defined(BH_PLATFORM_LINUX_SGX) /* The mmapped AOT data and code in 64-bit targets had better be in range 0 to 2G, or aot loader may fail to apply some relocations, @@ -421,6 +424,9 @@ get_aot_file_target(AOTTargetInfo *target_info, char *target_buf, case E_MACHINE_RISCV: machine_type = "riscv"; break; + case E_MACHINE_LOONGARCH: + machine_type = "loongarch"; + break; case E_MACHINE_ARC_COMPACT: case E_MACHINE_ARC_COMPACT2: machine_type = "arc"; @@ -3081,6 +3087,23 @@ is_text_section(const char *section_name) return !strcmp(section_name, ".text") || !strcmp(section_name, ".ltext"); } +#define R_LARCH_GOT_PC_HI20 75 +#define R_LARCH_GOT_PC_LO12 76 +#define R_LARCH_GOT64_PC_LO20 77 +#define R_LARCH_GOT64_PC_HI12 78 + +#if defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) +static bool +is_loongarch_got_reloc(uint32 type) +{ + if (type == R_LARCH_GOT_PC_HI20 || type == R_LARCH_GOT_PC_LO12 + || type == R_LARCH_GOT64_PC_LO20 || type == R_LARCH_GOT64_PC_HI12) + return true; + return false; +} +#endif + static bool do_text_relocation(AOTModule *module, AOTRelocationGroup *group, char *error_buf, uint32 error_buf_size) @@ -3289,6 +3312,13 @@ do_text_relocation(AOTModule *module, AOTRelocationGroup *group, "resolve symbol %s failed", symbol); goto check_symbol_fail; } +#if defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) + else if (is_loongarch_got_reloc(relocation->relocation_type)) { + symbol_addr = + &get_target_symbol_map(NULL)[symbol_index].symbol_addr; + } +#endif if (symbol != symbol_buf) wasm_runtime_free(symbol); diff --git a/core/iwasm/aot/arch/aot_reloc_loongarch.c b/core/iwasm/aot/arch/aot_reloc_loongarch.c new file mode 100644 index 0000000000..074a6995e9 --- /dev/null +++ b/core/iwasm/aot/arch/aot_reloc_loongarch.c @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2025 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "aot_reloc.h" + +#define R_LARCH_64 2 +#define R_LARCH_B26 66 +#define R_LARCH_PCALA_HI20 71 +#define R_LARCH_PCALA_LO12 72 +#define R_LARCH_PCALA64_LO20 73 +#define R_LARCH_PCALA64_HI12 74 +#define R_LARCH_GOT_PC_HI20 75 +#define R_LARCH_GOT_PC_LO12 76 +#define R_LARCH_GOT64_PC_LO20 77 +#define R_LARCH_GOT64_PC_HI12 78 +#define R_LARCH_CALL36 110 + +static SymbolMap target_sym_map[] = { + /* clang-format off */ + REG_COMMON_SYMBOLS + /* clang-format on */ +}; + +static void +set_error_buf(char *error_buf, uint32 error_buf_size, const char *string) +{ + if (error_buf != NULL) + snprintf(error_buf, error_buf_size, "%s", string); +} + +void +get_current_target(char *target_buf, uint32 target_buf_size) +{ + snprintf(target_buf, target_buf_size, "loongarch"); +} + +uint32 +get_plt_item_size(void) +{ +#if __loongarch_grlen == 64 + /* 4*4 bytes instructions and 8 bytes symbol address */ + return 24; +#else + /* TODO */ + return 0; +#endif +} + +SymbolMap * +get_target_symbol_map(uint32 *sym_num) +{ + if (sym_num != NULL) + *sym_num = sizeof(target_sym_map) / sizeof(SymbolMap); + return target_sym_map; +} + +uint32 +get_plt_table_size() +{ + return get_plt_item_size() * (sizeof(target_sym_map) / sizeof(SymbolMap)); +} + +void +init_plt_table(uint8 *plt) +{ +#if __loongarch_grlen == 64 + uint32 i, num = sizeof(target_sym_map) / sizeof(SymbolMap); + + for (i = 0; i < num; i++) { + uint32 *p = (uint32 *)plt; + *p++ = 0x1c00000d; /* pcaddu12i $t1, 0 */ + *p++ = 0x28c041ad; /* ld.d $t1, $t1, 16 */ + *p++ = 0x4c0001a0; /* jr $t1 */ + *p++ = 0x03400000; /* nop */ + /* symbol addr */ + *(uint64 *)p = (uint64)(uintptr_t)target_sym_map[i].symbol_addr; + plt += get_plt_item_size(); + } +#else + /* TODO */ +#endif +} + +typedef struct RelocTypeStrMap { + uint32 reloc_type; + char *reloc_str; +} RelocTypeStrMap; + +#define RELOC_TYPE_MAP(reloc_type) \ + { \ + reloc_type, #reloc_type \ + } + +static RelocTypeStrMap reloc_type_str_maps[] = { + RELOC_TYPE_MAP(R_LARCH_64), RELOC_TYPE_MAP(R_LARCH_B26), + RELOC_TYPE_MAP(R_LARCH_PCALA_HI20), RELOC_TYPE_MAP(R_LARCH_PCALA_LO12), + RELOC_TYPE_MAP(R_LARCH_CALL36), +}; + +static const char * +reloc_type_to_str(uint32 reloc_type) +{ + uint32 i; + + for (i = 0; i < sizeof(reloc_type_str_maps) / sizeof(RelocTypeStrMap); + i++) { + if (reloc_type_str_maps[i].reloc_type == reloc_type) + return reloc_type_str_maps[i].reloc_str; + } + + return "Unknown_Reloc_Type"; +} + +static bool +check_reloc_offset(uint32 target_section_size, uint64 reloc_offset, + uint32 reloc_data_size, char *error_buf, + uint32 error_buf_size) +{ + if (!(reloc_offset < (uint64)target_section_size + && reloc_offset + reloc_data_size <= (uint64)target_section_size)) { + set_error_buf(error_buf, error_buf_size, + "AOT module load failed: invalid relocation offset."); + return false; + } + return true; +} + +#define Page(expr) ((expr) & ~0xFFF) + +/* Calculate the adjusted page delta between dest and PC. */ +uint64 +getLoongArchPageDelta(uint64 dest, uint64 pc, uint32 reloc_type) +{ + uint64 pcalau12i_pc; + switch (reloc_type) { + case R_LARCH_PCALA64_LO20: + case R_LARCH_GOT64_PC_LO20: + pcalau12i_pc = pc - 8; + break; + case R_LARCH_PCALA64_HI12: + case R_LARCH_GOT64_PC_HI12: + pcalau12i_pc = pc - 12; + break; + default: + pcalau12i_pc = pc; + break; + } + uint64 result = Page(dest) - Page(pcalau12i_pc); + if (dest & 0x800) + result += 0x1000 - 0x100000000; + if (result & 0x80000000) + result += 0x100000000; + return result; +} + +/* Extract bits v[begin:end], where range is inclusive. */ +static uint32 +extractBits(uint64 v, uint32 begin, uint32 end) +{ + return begin == 63 ? v >> end : (v & ((1ULL << (begin + 1)) - 1)) >> end; +} + +static uint32 +setD10k16(uint32 insn, uint32 imm) +{ + uint32 immLo = extractBits(imm, 15, 0); + uint32 immHi = extractBits(imm, 25, 16); + return (insn & 0xfc000000) | (immLo << 10) | immHi; +} + +static uint32_t +setJ20(uint32 insn, uint32 imm) +{ + return (insn & 0xfe00001f) | (extractBits(imm, 19, 0) << 5); +} + +static uint32_t +setK12(uint32 insn, uint32 imm) +{ + return (insn & 0xffc003ff) | (extractBits(imm, 11, 0) << 10); +} + +static uint32_t +setK16(uint32_t insn, uint32_t imm) +{ + return (insn & 0xfc0003ff) | (extractBits(imm, 15, 0) << 10); +} + +bool +apply_relocation(AOTModule *module, uint8 *target_section_addr, + uint32 target_section_size, uint64 reloc_offset, + int64 reloc_addend, uint32 reloc_type, void *symbol_addr, + int32 symbol_index, char *error_buf, uint32 error_buf_size) +{ + void *S = symbol_addr; + int64 A = reloc_addend; + uint8 *P = target_section_addr + reloc_offset; + int32 insn = *(int32 *)P; + char buf[128]; + int64 X; + + switch (reloc_type) { + case R_LARCH_64: /* S + A */ + { + int64 val_64 = (int64)((intptr_t)S + (intptr_t)A); + + CHECK_RELOC_OFFSET(sizeof(int64)); + if (val_64 != ((intptr_t)S + (intptr_t)A)) { + goto fail_addr_out_of_range; + } + + bh_memcpy_s(P, sizeof(int64_t), &val_64, sizeof(int64_t)); + break; + } + case R_LARCH_B26: + case R_LARCH_CALL36: /* S + A - P */ + { + if (reloc_type == R_LARCH_B26) { + CHECK_RELOC_OFFSET(sizeof(int32)); + } + else if (reloc_type == R_LARCH_CALL36) { + CHECK_RELOC_OFFSET(sizeof(int64)); + } + /* Negative symbol index means the symbol is an AOT function and we + * suppose R_LARCH_{B26,CALL36} is able to address it, so apply the + * relocation with the symbol directly. Otherwise, the symbol is a + * runtime function or a native function whose address is probably + * beyond of R_LARCH_{B26,CALL36}'s addressing range, so apply the + * relocation with PLT. + */ + if (symbol_index >= 0) { + if (reloc_addend > 0) { + set_error_buf( + error_buf, error_buf_size, + "AOT module load failed: relocate to plt table " + "with reloc addend larger than 0 is unsupported."); + return false; + } + S = (uint8 *)module->code + module->code_size + - get_plt_table_size() + get_plt_item_size() * symbol_index; + } + if (reloc_type == R_LARCH_B26) { + X = (int64)S + A - (int64)P; + if (!(X >= (-128 * BH_MB) && X <= (128 * BH_MB - 4))) { + goto fail_addr_out_of_range; + } + if ((X & 3) != 0) { + goto fail_addr_not_4bytes_aligned; + } + *(int32 *)P = setD10k16(insn, X >> 2); + } + else if (reloc_type == R_LARCH_CALL36) { + int32 jirl = *(int32 *)(P + 4); + X = (int64)S + A - (int64)P; + if (!(X >= (-128LL * BH_GB - 0x20000) + && X <= (128LL * BH_GB - 0x20000 - 4))) { + goto fail_addr_out_of_range; + } + if ((X & 3) != 0) { + goto fail_addr_not_4bytes_aligned; + } + uint32 hi20 = extractBits(X + (1 << 17), 37, 18); + uint32 lo16 = extractBits(X, 17, 2); + *(int32 *)P = setJ20(insn, hi20); + *(int32 *)(P + 4) = setK16(jirl, lo16); + } + break; + } + case R_LARCH_PCALA_HI20: + case R_LARCH_GOT_PC_HI20: /* GOT + G has been calculated as symbol_addr + */ + { + CHECK_RELOC_OFFSET(sizeof(int32)); + X = getLoongArchPageDelta((int64)S + A, (int64)P, reloc_type); + /* Note: Like ld and lld, no overflow check. */ + *(int32 *)P = setJ20(insn, extractBits(X, 31, 12)); + break; + } + case R_LARCH_PCALA_LO12: + case R_LARCH_GOT_PC_LO12: /* GOT + G has been calculated as symbol_addr + */ + { + CHECK_RELOC_OFFSET(sizeof(int32)); + *(int32 *)P = setK12(insn, extractBits((int64)S + A, 11, 0)); + break; + } + case R_LARCH_PCALA64_LO20: + case R_LARCH_GOT64_PC_LO20: /* GOT + G has been calculated as + symbol_addr */ + { + CHECK_RELOC_OFFSET(sizeof(int32)); + X = getLoongArchPageDelta((int64)S + A, (int64)P, reloc_type); + *(int32 *)P = setJ20(insn, extractBits(X, 51, 32)); + break; + } + case R_LARCH_PCALA64_HI12: + case R_LARCH_GOT64_PC_HI12: /* GOT + G has been calculated as + symbol_addr */ + { + CHECK_RELOC_OFFSET(sizeof(int32)); + X = getLoongArchPageDelta((int64)S + A, (int64)P, reloc_type); + *(int32 *)P = setK12(insn, extractBits(X, 63, 52)); + break; + } + + default: + if (error_buf != NULL) + snprintf(error_buf, error_buf_size, + "Load relocation section failed: " + "invalid relocation type %" PRIu32 ".", + reloc_type); + return false; + } + + /* clear icache */ + __asm__ volatile("ibar 0"); + + return true; + +fail_addr_out_of_range: + snprintf(buf, sizeof(buf), + "AOT module load failed: " + "relocation truncated to fit %s failed.", + reloc_type_to_str(reloc_type)); + set_error_buf(error_buf, error_buf_size, buf); + return false; + +fail_addr_not_4bytes_aligned: + snprintf(buf, sizeof(buf), + "AOT module load failed: " + "target address is not 4-bytes aligned."); + set_error_buf(error_buf, error_buf_size, buf); + return false; +} diff --git a/core/iwasm/aot/debug/elf.h b/core/iwasm/aot/debug/elf.h index 9bdad6521d..62189af015 100644 --- a/core/iwasm/aot/debug/elf.h +++ b/core/iwasm/aot/debug/elf.h @@ -70,14 +70,15 @@ #define EM_SH 42 /* SuperH */ #define EM_SPARCV9 43 /* SPARC v9 64-bit */ #define EM_H8_300 46 -#define EM_IA_64 50 /* HP/Intel IA-64 */ -#define EM_X86_64 62 /* AMD x86-64 */ -#define EM_S390 22 /* IBM S/390 */ -#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ -#define EM_V850 87 /* NEC v850 */ -#define EM_M32R 88 /* Renesas M32R */ -#define EM_XTENSA 94 /* Tensilica Xtensa */ -#define EM_RISCV 243 /* RISC-V */ +#define EM_IA_64 50 /* HP/Intel IA-64 */ +#define EM_X86_64 62 /* AMD x86-64 */ +#define EM_S390 22 /* IBM S/390 */ +#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ +#define EM_V850 87 /* NEC v850 */ +#define EM_M32R 88 /* Renesas M32R */ +#define EM_XTENSA 94 /* Tensilica Xtensa */ +#define EM_RISCV 243 /* RISC-V */ +#define EM_LOONGARCH 258 /* LoongArch */ #define EM_ALPHA 0x9026 #define EM_CYGNUS_V850 0x9080 #define EM_CYGNUS_M32R 0x9041 diff --git a/core/iwasm/aot/iwasm_aot.cmake b/core/iwasm/aot/iwasm_aot.cmake index efff88dd07..d96c4b5306 100644 --- a/core/iwasm/aot/iwasm_aot.cmake +++ b/core/iwasm/aot/iwasm_aot.cmake @@ -25,6 +25,8 @@ elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA") set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_xtensa.c) elseif (WAMR_BUILD_TARGET MATCHES "RISCV*") set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_riscv.c) +elseif (WAMR_BUILD_TARGET MATCHES "LOONGARCH*") + set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_loongarch.c) elseif (WAMR_BUILD_TARGET STREQUAL "ARC") set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_arc.c) else () diff --git a/core/iwasm/common/arch/invokeNative_loongarch.S b/core/iwasm/common/arch/invokeNative_loongarch.S new file mode 100644 index 0000000000..92ffcdaf23 --- /dev/null +++ b/core/iwasm/common/arch/invokeNative_loongarch.S @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2025 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#if defined(__loongarch_soft_float) +#define LA_FPREG_SIZE 0 +#elif defined(__loongarch_single_float) +#define LA_OP_LOADFPREG fld.s +#define LA_OP_STROEFPREG fst.s +#define LA_FPREG_SIZE 4 +#elif defined(__loongarch_double_float) +#define LA_OP_LOADFPREG fld.d +#define LA_OP_STROEFPREG fst.d +#define LA_FPREG_SIZE 8 +#endif + +#if __loongarch_grlen == 32 +#define LA_OP_ADDI addi.w +#define LA_OP_SLLI slli.w +#define LA_OP_SUB sub.w +#define LA_OP_BSTRINS bstrins.w +#define LA_OP_LOADREG ld.w +#define LA_OP_STOREREG st.w +#define LA_REG_SIZE 4 +#define LA_REG_SHIFT 2 +#define LA_FP_OFFSET (8 * LA_REG_SIZE) +#define LA_INT_OFFSET 0 +#else +#define LA_OP_ADDI addi.d +#define LA_OP_SLLI slli.d +#define LA_OP_SUB sub.d +#define LA_OP_BSTRINS bstrins.d +#define LA_OP_LOADREG ld.d +#define LA_OP_STOREREG st.d +#define LA_REG_SIZE 8 +#define LA_REG_SHIFT 3 +#define LA_FP_OFFSET 0 +#define LA_INT_OFFSET (8 * LA_FPREG_SIZE) +#endif + + .text + .align 2 +#ifndef BH_PLATFORM_DARWIN + .globl invokeNative + .type invokeNative, function +invokeNative: +#else + .globl _invokeNative +_invokeNative: +#endif /* end of BH_PLATFORM_DARWIN */ + +/* + * Arguments passed in: + * + * a0 function ptr + * a1 argv + * a2 nstacks + */ + +/* + * $sp (stack pointer) + * |- st.d/st.w to store 64/32-bit values from register to memory + * |- ld.d/ld.w to load from stack to register + * $fp/$s9 (frame pointer) + * $a0-$a7 (8 integer arguments) + * |- st.d/st.w to store + * |- ld.d/ld.w to load + * $fa0-$a7 (8 float arguments) + * |- fst.d/fst.s to store + * |- fld.d/fld.s to load + * $t0-$t8 (temporaries regisgers) + * |- caller saved + */ + + /* reserve space on stack to save return address and frame pointer */ + LA_OP_ADDI $sp, $sp, - 2 * LA_REG_SIZE + LA_OP_STOREREG $fp, $sp, 0 * LA_REG_SIZE /* save frame pointer */ + LA_OP_STOREREG $ra, $sp, 1 * LA_REG_SIZE /* save return address */ + + move $fp, $sp /* set frame pointer to bottom of fixed frame */ + + /* save function ptr, argv & nstacks */ + move $t0, $a0 /* $t0 = function ptr */ + move $t1, $a1 /* $t1 = argv array address */ + move $t2, $a2 /* $t2 = nstack */ + +#ifndef __loongarch_soft_float + /* fill in $fa0-7 float-registers*/ + LA_OP_LOADFPREG $fa0, $t1, LA_FP_OFFSET + 0 * LA_FPREG_SIZE /* $fa0 */ + LA_OP_LOADFPREG $fa1, $t1, LA_FP_OFFSET + 1 * LA_FPREG_SIZE /* $fa1 */ + LA_OP_LOADFPREG $fa2, $t1, LA_FP_OFFSET + 2 * LA_FPREG_SIZE /* $fa2 */ + LA_OP_LOADFPREG $fa3, $t1, LA_FP_OFFSET + 3 * LA_FPREG_SIZE /* $fa3 */ + LA_OP_LOADFPREG $fa4, $t1, LA_FP_OFFSET + 4 * LA_FPREG_SIZE /* $fa4 */ + LA_OP_LOADFPREG $fa5, $t1, LA_FP_OFFSET + 5 * LA_FPREG_SIZE /* $fa5 */ + LA_OP_LOADFPREG $fa6, $t1, LA_FP_OFFSET + 6 * LA_FPREG_SIZE /* $fa6 */ + LA_OP_LOADFPREG $fa7, $t1, LA_FP_OFFSET + 7 * LA_FPREG_SIZE /* $fa7 */ +#endif + + /* fill in $a0-7 integer-registers*/ + LA_OP_LOADREG $a0, $t1, LA_INT_OFFSET + 0 * LA_REG_SIZE /* $a0 */ + LA_OP_LOADREG $a1, $t1, LA_INT_OFFSET + 1 * LA_REG_SIZE /* $a1 */ + LA_OP_LOADREG $a2, $t1, LA_INT_OFFSET + 2 * LA_REG_SIZE /* $a2 */ + LA_OP_LOADREG $a3, $t1, LA_INT_OFFSET + 3 * LA_REG_SIZE /* $a3 */ + LA_OP_LOADREG $a4, $t1, LA_INT_OFFSET + 4 * LA_REG_SIZE /* $a4 */ + LA_OP_LOADREG $a5, $t1, LA_INT_OFFSET + 5 * LA_REG_SIZE /* $a5 */ + LA_OP_LOADREG $a6, $t1, LA_INT_OFFSET + 6 * LA_REG_SIZE /* $a6 */ + LA_OP_LOADREG $a7, $t1, LA_INT_OFFSET + 7 * LA_REG_SIZE /* $a7 */ + + /* $t1 points to stack args */ + + /* LA_FPREG_SIZE is zero when __loongarch_soft_float defined */ + LA_OP_ADDI $t1, $t1, LA_REG_SIZE * 8 + LA_FPREG_SIZE * 8 + + /* directly call the function if no args in stack */ + beqz $t2, call_func + + /* reserve enough stack space for function arguments */ + LA_OP_SLLI $t3, $t2, LA_REG_SHIFT /* shift left 3 bits. $t3 = n_stacks * 8 */ + LA_OP_SUB $sp, $sp, $t3 + + /* make 16-byte aligned */ + LA_OP_BSTRINS $sp, $zero, 3, 0 + + /* save $sp in t4 register */ + move $t4, $sp + + /* copy left arguments from caller stack to own frame stack */ +loop_stack_args: + beqz $t2, call_func + LA_OP_LOADREG $t5, $t1, 0 /* load stack argument, $t5 = argv[i] */ + LA_OP_STOREREG $t5, $t4, 0 /* store $t5 to reseved stack, $sp[j] = $t5 */ + LA_OP_ADDI $t1, $t1, LA_REG_SIZE /* move to next stack argument */ + LA_OP_ADDI $t4, $t4, LA_REG_SIZE /* move to next stack pointer */ + LA_OP_ADDI $t2, $t2, -1 /* decrease $t2 every loop, nstacks = nstacks -1 */ + b loop_stack_args + +call_func: + jirl $ra, $t0, 0 + + /* restore registers pushed in stack or saved in another register */ +return: + move $sp, $fp /* restore $sp saved in fp before function call */ + LA_OP_LOADREG $fp, $sp, 0 * LA_REG_SIZE /* load previous frame poniter to $fp register */ + LA_OP_LOADREG $ra, $sp, 1 * LA_REG_SIZE /* load previous return address to $ra register */ + LA_OP_ADDI $sp, $sp, 2 * LA_REG_SIZE /* pop frame, restore $sp */ + jr $ra diff --git a/core/iwasm/common/iwasm_common.cmake b/core/iwasm/common/iwasm_common.cmake index 15895b8e5e..ae43896a3b 100644 --- a/core/iwasm/common/iwasm_common.cmake +++ b/core/iwasm/common/iwasm_common.cmake @@ -89,6 +89,8 @@ elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA") set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_xtensa.s) elseif (WAMR_BUILD_TARGET MATCHES "RISCV*") set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv.S) +elseif (WAMR_BUILD_TARGET MATCHES "LOONGARCH*") + set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_loongarch.S) elseif (WAMR_BUILD_TARGET STREQUAL "ARC") set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_arc.s) else () diff --git a/core/iwasm/common/wasm_runtime_common.c b/core/iwasm/common/wasm_runtime_common.c index 5517fe60fc..f675624f63 100644 --- a/core/iwasm/common/wasm_runtime_common.c +++ b/core/iwasm/common/wasm_runtime_common.c @@ -5033,7 +5033,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, n_stacks += 2; } break; -#else /* BUILD_TARGET_RISCV32_ILP32D */ +#else /* else of !defined(BUILD_TARGET_RISCV32_ILP32D) */ case VALUE_TYPE_F32: case VALUE_TYPE_F64: if (n_fps < MAX_REG_FLOATS) { @@ -5059,7 +5059,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, n_stacks += 2; } break; -#endif /* BUILD_TARGET_RISCV32_ILP32D */ +#endif /* end of !defined(BUILD_TARGET_RISCV32_ILP32D) */ default: bh_assert(0); break; @@ -5254,7 +5254,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, } break; } -#else /* BUILD_TARGET_RISCV32_ILP32D */ +#else /* else of !defined(BUILD_TARGET_RISCV32_ILP32D) */ case VALUE_TYPE_F32: case VALUE_TYPE_F64: { @@ -5302,7 +5302,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, } break; } -#endif /* BUILD_TARGET_RISCV32_ILP32D */ +#endif /* end of !defined(BUILD_TARGET_RISCV32_ILP32D) */ #if WASM_ENABLE_GC == 0 && WASM_ENABLE_REF_TYPES != 0 case VALUE_TYPE_EXTERNREF: { @@ -5689,7 +5689,9 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) #if WASM_ENABLE_SIMD != 0 #ifdef v128 @@ -5709,7 +5711,9 @@ typedef union __declspec(intrin_type) __declspec(align(8)) v128 { } v128; #elif defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \ || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) typedef long long v128 __attribute__((__vector_size__(16), __may_alias__, __aligned__(1))); #elif defined(BUILD_TARGET_AARCH64) @@ -5754,13 +5758,17 @@ static V128FuncPtr invokeNative_V128 = (V128FuncPtr)(uintptr_t)invokeNative; #else /* else of defined(_WIN32) || defined(_WIN32_) */ #define MAX_REG_FLOATS 8 #if defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) #define MAX_REG_INTS 8 #else #define MAX_REG_INTS 6 -#endif /* end of defined(BUILD_TARGET_AARCH64) \ - || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) */ +#endif /* end of defined(BUILD_TARGET_AARCH64) \ + || defined(BUILD_TARGET_RISCV64_LP64D) \ + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) */ #endif /* end of defined(_WIN32) || defined(_WIN32_) */ /* @@ -5793,17 +5801,19 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, #if WASM_ENABLE_GC == 0 && WASM_ENABLE_REF_TYPES != 0 bool is_aot_func = (NULL == signature); #endif -#ifndef BUILD_TARGET_RISCV64_LP64 +#if !defined(BUILD_TARGET_RISCV64_LP64) \ + && !defined(BUILD_TARGET_LOONGARCH64_LP64) #if WASM_ENABLE_SIMD == 0 uint64 *fps; #else v128 *fps; #endif -#else /* else of BUILD_TARGET_RISCV64_LP64 */ +#else /* else of BUILD_TARGET_RISCV64_LP64/BUILD_TARGET_LOONGARCH64_LP64 */ #define fps ints -#endif /* end of BUILD_TARGET_RISCV64_LP64 */ +#endif /* end of BUILD_TARGET_RISCV64_LP64/BUILD_TARGET_LOONGARCH64_LP64 */ -#if defined(_WIN32) || defined(_WIN32_) || defined(BUILD_TARGET_RISCV64_LP64) +#if defined(_WIN32) || defined(_WIN32_) || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) /* important difference in calling conventions */ #define n_fps n_ints #else @@ -5824,7 +5834,8 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, } } -#ifndef BUILD_TARGET_RISCV64_LP64 +#if !defined(BUILD_TARGET_RISCV64_LP64) \ + && !defined(BUILD_TARGET_LOONGARCH64_LP64) #if WASM_ENABLE_SIMD == 0 fps = argv1; ints = fps + MAX_REG_FLOATS; @@ -5832,9 +5843,9 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, fps = (v128 *)argv1; ints = (uint64 *)(fps + MAX_REG_FLOATS); #endif -#else /* else of BUILD_TARGET_RISCV64_LP64 */ +#else /* else of BUILD_TARGET_RISCV64_LP64/BUILD_TARGET_LOONGARCH64_LP64 */ ints = argv1; -#endif /* end of BUILD_TARGET_RISCV64_LP64 */ +#endif /* end of BUILD_TARGET_RISCV64_LP64/BUILD_TARGET_LOONGARCH64_LP64 */ stacks = ints + MAX_REG_INTS; ints[n_ints++] = (uint64)(uintptr_t)exec_env; @@ -6108,11 +6119,13 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr, return ret; } -#endif /* end of defined(BUILD_TARGET_X86_64) \ - || defined(BUILD_TARGET_AMD_64) \ - || defined(BUILD_TARGET_AARCH64) \ - || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) */ +#endif /* end of defined(BUILD_TARGET_X86_64) \ + || defined(BUILD_TARGET_AMD_64) \ + || defined(BUILD_TARGET_AARCH64) \ + || defined(BUILD_TARGET_RISCV64_LP64D) \ + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) */ bool wasm_runtime_call_indirect(WASMExecEnv *exec_env, uint32 element_index, diff --git a/core/iwasm/compilation/aot_emit_numberic.c b/core/iwasm/compilation/aot_emit_numberic.c index 1f37060d91..632f3df132 100644 --- a/core/iwasm/compilation/aot_emit_numberic.c +++ b/core/iwasm/compilation/aot_emit_numberic.c @@ -875,6 +875,12 @@ is_target_riscv(AOTCompContext *comp_ctx) return !strncmp(comp_ctx->target_arch, "riscv", 5); } +static bool +is_target_loongarch(AOTCompContext *comp_ctx) +{ + return !strncmp(comp_ctx->target_arch, "loongarch", 9); +} + static bool is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32) { @@ -909,7 +915,7 @@ is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32) else ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false; } - else if (is_target_riscv(comp_ctx)) { + else if (is_target_riscv(comp_ctx) || is_target_loongarch(comp_ctx)) { /* * Note: Use builtin intrinsics since hardware float operation * will cause rodata relocation, this will try to use hardware diff --git a/core/iwasm/compilation/aot_llvm.c b/core/iwasm/compilation/aot_llvm.c index 14ee4dd2b7..b2242626ce 100644 --- a/core/iwasm/compilation/aot_llvm.c +++ b/core/iwasm/compilation/aot_llvm.c @@ -190,6 +190,13 @@ aot_target_precheck_can_use_musttail(const AOTCompContext *comp_ctx) */ return false; } + if (strstr(comp_ctx->target_arch, "loongarch")) { + /* + * cf. + * https://github.com/bytecodealliance/wasm-micro-runtime/issues/2412 + */ + return false; + } /* * x86-64/i386: true * @@ -2205,7 +2212,8 @@ static ArchItem valid_archs[] = { { "thumbv8.1m.main", true }, { "riscv32", true }, { "riscv64", true }, - { "arc", true } + { "arc", true }, + { "loongarch64", true }, }; static const char *valid_abis[] = { @@ -2817,10 +2825,10 @@ aot_create_comp_context(const AOTCompData *comp_data, aot_comp_option_t option) } /* Set default abi for riscv target */ - if (arch && !strncmp(arch, "riscv", 5) && !abi) { - if (!strcmp(arch, "riscv64")) + if (arch && !abi) { + if (!strcmp(arch, "riscv64") || !strcmp(arch, "loongarch64")) abi = "lp64d"; - else + else if (!strcmp(arch, "riscv32")) abi = "ilp32d"; } @@ -2991,8 +2999,9 @@ aot_create_comp_context(const AOTCompData *comp_data, aot_comp_option_t option) goto fail; } - /* Add module flag and cpu feature for riscv target */ - if (arch && !strncmp(arch, "riscv", 5)) { + /* Add module flag and cpu feature for riscv or loongarch target */ + if (arch + && (!strncmp(arch, "riscv", 5) || !strncmp(arch, "loongarch", 9))) { LLVMMetadataRef meta_target_abi; if (!(meta_target_abi = LLVMMDStringInContext2(comp_ctx->context, diff --git a/core/shared/platform/common/posix/posix_memmap.c b/core/shared/platform/common/posix/posix_memmap.c index 1d972f5fa3..7dc6c881bf 100644 --- a/core/shared/platform/common/posix/posix_memmap.c +++ b/core/shared/platform/common/posix/posix_memmap.c @@ -90,7 +90,9 @@ os_mmap(void *hint, size_t size, int prot, int flags, os_file_handle file) if (flags & MMAP_MAP_FIXED) map_flags |= MAP_FIXED; -#if defined(BUILD_TARGET_RISCV64_LP64D) || defined(BUILD_TARGET_RISCV64_LP64) +#if defined(BUILD_TARGET_RISCV64_LP64D) || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) /* As AOT relocation in RISCV64 may require that the code/data mapped * is in range 0 to 2GB, we try to map the memory with hint address * (mmap's first argument) to meet the requirement. @@ -134,7 +136,9 @@ os_mmap(void *hint, size_t size, int prot, int flags, os_file_handle file) hint_addr += BH_MB; } } -#endif /* end of BUILD_TARGET_RISCV64_LP64D || BUILD_TARGET_RISCV64_LP64 */ +#endif /* end of BUILD_TARGET_RISCV64_LP64D || BUILD_TARGET_RISCV64_LP64 \ + || BUILD_TARGET_LOONGARCH64_LP64D \ + || BUILD_TARGET_LOONGARCH64_LP64 */ /* memory hasn't been mapped or was mapped failed previously */ if (addr == MAP_FAILED) { diff --git a/core/shared/platform/linux/platform_internal.h b/core/shared/platform/linux/platform_internal.h index 865180273e..187128e309 100644 --- a/core/shared/platform/linux/platform_internal.h +++ b/core/shared/platform/linux/platform_internal.h @@ -83,7 +83,9 @@ typedef sem_t korp_sem; #if WASM_DISABLE_HW_BOUND_CHECK == 0 #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \ - || defined(BUILD_TARGET_RISCV64_LP64) + || defined(BUILD_TARGET_RISCV64_LP64) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64D) \ + || defined(BUILD_TARGET_LOONGARCH64_LP64) #include @@ -111,7 +113,7 @@ os_signal_unmask(); void os_sigreturn(); -#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64 */ +#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64/LOONGARCH64 */ #endif /* end of WASM_DISABLE_HW_BOUND_CHECK */ #define os_getpagesize getpagesize diff --git a/doc/loongarch.md b/doc/loongarch.md new file mode 100644 index 0000000000..7b9a3bd8af --- /dev/null +++ b/doc/loongarch.md @@ -0,0 +1,34 @@ +# Introduction + +This document describes how to cross build `iwasm` on x86-64 Ubuntu 24.04 and run it with `qemu-loongarch64` without depending on an real LoongArch hardware. + +## Download LoongArch cross toolchain + +```bash +wget https://github.com/loongson/build-tools/releases/download/untagged-afda1c2ad38028517e0e/x86_64-cross-tools-loongarch64-binutils_2.42-gcc_14.1.0-glibc_2.39.tar.xz +# extract it to anywhere +``` + +## Build iwasm + +```bash +cd product-mini/platforms/linux +mkdir buildla && cd buildla +cmake .. \ + -DWAMR_BUILD_TARGET=LOONGARCH64 \ + -DCMAKE_C_COMPILER=/path/to/your/cross-tools/bin/loongarch64-unknown-linux-gnu-gcc \ + -DCMAKE_CXX_COMPILER=/path/to/your/cross-tools/bin/loongarch64-unknown-linux-gnu-g++ +make +``` + +## Install qemu-loongarch64 + +```bash +sudo apt install qemu-user +``` + +## Run + +```bash +qemu-loongarch64 -L /path/to/your/cross-tools/target/usr -E LD_LIBRARY_PATH=/path/to/your/cross-tools/target/usr/lib64 ./iwasm /path/to/wasm_or_aot_file +``` diff --git a/wamr-compiler/CMakeLists.txt b/wamr-compiler/CMakeLists.txt index ab98b03825..9c0713af61 100644 --- a/wamr-compiler/CMakeLists.txt +++ b/wamr-compiler/CMakeLists.txt @@ -115,6 +115,8 @@ elseif (WAMR_BUILD_TARGET MATCHES "AARCH64.*") elseif (WAMR_BUILD_TARGET MATCHES "ARM.*") add_definitions(-DBUILD_TARGET_ARM) add_definitions(-DBUILD_TARGET="${WAMR_BUILD_TARGET}") +elseif (WAMR_BUILD_TARGET STREQUAL "LOONGARCH64" OR WAMR_BUILD_TARGET STREQUAL "LOONGARCH64_LP64D") + add_definitions(-DBUILD_TARGET_LOONGARCH64_LP64D) elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64" OR WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64D") add_definitions(-DBUILD_TARGET_RISCV64_LP64D) elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64") @@ -133,6 +135,7 @@ message ("-- Build as target ${WAMR_BUILD_TARGET}") if (CMAKE_SIZEOF_VOID_P EQUAL 8) if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64" + OR WAMR_BUILD_TARGET MATCHES "LOONGARCH64.*" OR WAMR_BUILD_TARGET MATCHES "AARCH64.*" OR WAMR_BUILD_TARGET MATCHES "RISCV64.*") if (NOT WAMR_BUILD_PLATFORM STREQUAL "windows") # Add -fPIC flag if build as 64-bit diff --git a/wamr-compiler/main.c b/wamr-compiler/main.c index 3efe344e6a..79024a5ebd 100644 --- a/wamr-compiler/main.c +++ b/wamr-compiler/main.c @@ -112,13 +112,14 @@ print_help() printf("Usage: wamrc [options] -o output_file wasm_file\n"); printf(" --target= Set the target arch, which has the general format: \n"); printf(" = x86_64, i386, aarch64, arm, thumb, xtensa, mips,\n"); - printf(" riscv64, riscv32.\n"); + printf(" riscv64, riscv32, loongarch64.\n"); printf(" Default is host arch, e.g. x86_64\n"); printf(" = for ex. on arm or thumb: v5, v6m, v7a, v7m, etc.\n"); printf(" Use --target=help to list supported targets\n"); printf(" --target-abi= Set the target ABI, e.g. gnu, eabi, gnueabihf, msvc, etc.\n"); - printf(" Default is gnu if target isn't riscv64 or riscv32\n"); - printf(" For target riscv64 and riscv32, default is lp64d and ilp32d\n"); + printf(" Default is gnu if target isn't riscv64, riscv32 and loongarch64\n"); + printf(" For target riscv64 and loongarch64, default is lp64d\n"); + printf(" For target riscv32, default is ilp32d\n"); printf(" Use --target-abi=help to list all the ABI supported\n"); printf(" --cpu= Set the target CPU (default: host CPU, e.g. skylake)\n"); printf(" Use --cpu=help to list all the CPU supported\n");