diff --git a/arch/Kconfig b/arch/Kconfig index 355a6247af38..8f26fde1cd94 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -112,6 +112,18 @@ config NIOS2 help Nios II Gen 2 architecture +config MICROBLAZE + bool + select ARCH_IS_SET + select ATOMIC_OPERATIONS_C + select GEN_ISR_TABLES + select GEN_SW_ISR_TABLE + select ARCH_HAS_EXTRA_EXCEPTION_INFO + imply DYNAMIC_INTERRUPTS + imply ARCH_HAS_CUSTOM_BUSY_WAIT + help + MicroBlaze architecture + config RISCV bool select ARCH_IS_SET diff --git a/arch/archs.yml b/arch/archs.yml index e07d10ffe80b..b940459ab89e 100644 --- a/arch/archs.yml +++ b/arch/archs.yml @@ -19,3 +19,5 @@ archs: path: xtensa - name: x86 path: x86 + - name: microblaze + path: microblaze diff --git a/arch/microblaze/CMakeLists.txt b/arch/microblaze/CMakeLists.txt new file mode 100644 index 000000000000..ebf4b251a36c --- /dev/null +++ b/arch/microblaze/CMakeLists.txt @@ -0,0 +1,46 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +# Set output binary format +set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT elf32-microblaze) + +add_subdirectory(core) +zephyr_include_directories(include) + +if(DEFINED CONFIG_BIG_ENDIAN) + set(extended_objdump_endianness "-EB") +else() + set(extended_objdump_endianness "-EL") +endif() + +if(DEFINED CONFIG_OUTPUT_DISASSEMBLY) + set_property(GLOBAL APPEND PROPERTY extra_post_build_commands + COMMAND $ + ${extended_objdump_endianness} + "-hSst" + ${disassembly_type} + $${KERNEL_ELF_NAME} + $extended_${KERNEL_LST_NAME} + $ + ) + set_property(GLOBAL APPEND PROPERTY extra_post_build_commands + COMMAND $ + ${extended_objdump_endianness} + "-hSst" + ${disassembly_type} + $zephyr_pre0.elf + $extended_zephyr_pre0.lst + $ + ) + set_property(GLOBAL APPEND PROPERTY extra_post_build_commands + COMMAND $ + ${extended_objdump_endianness} + "-hSst" + ${disassembly_type} + $zephyr_pre1.elf + $extended_zephyr_pre1.lst + $ + ) +endif() diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig new file mode 100644 index 000000000000..087a5a6573dd --- /dev/null +++ b/arch/microblaze/Kconfig @@ -0,0 +1,114 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +menu "MicroBlaze Options" + depends on MICROBLAZE + +config ARCH + string + default "microblaze" + +config CPU_MICROBLAZE + bool + default y + help + This option signifies the use of a MicroBlaze CPU + +config CPU_VERSION + prompt "MicroBlaze CPU Version" + string + default "v9.00.a" + help + Use features of, and schedule code for, the given CPU. + Supported values are in the format "vX.YY.Z", + where X is a major version, YY is the minor version, and Z is compatibility code. + Example values are "v3.00.a", "v4.00.b", "v5.00.a", "v5.00.b", "v6.00.a". + Taken from https://gcc.gnu.org/onlinedocs/gcc/MicroBlaze-Options.html + +config GEN_IRQ_VECTOR_TABLE + bool + default n if MICROBLAZE + help + MicroBlaze has a single interrupt and therefore doesn't have an IRQ vector table. + +config BIG_ENDIAN + bool + default n if MICROBLAZE + help + Our current default endianness is Little-endian. + +config ARCH_SW_ISR_TABLE_ALIGN + prompt "SW_ISR Table Align Size" + default 4 + +config NUM_IRQS + int + default 1 + help + This isn't really a choice either because a barebones MicroBlaze offers + only 1 external interrupt pin (which is usually connected to an Xlnx Intc + or probably a single peripheral which a user wants IRQs from). + +choice + prompt "Idle Sleep Option" + default MICROBLAZE_IDLE_NOP + + config MICROBLAZE_IDLE_NOP + bool "NOP (no power saving)" + help + Executes pseudo-assembly instruction nop in idle. + Reset_Mode[0:1] is set to 10 + + config MICROBLAZE_IDLE_SLEEP + bool "Sleep" + help + Executes pseudo-assembly instruction sleep in idle. + Reset_Mode[0:1] is set to 10 + + config MICROBLAZE_IDLE_HIBERNATE + bool "Hibernate" + help + Executes pseudo-assembly instruction hibernate in idle. + + config MICROBLAZE_IDLE_SUSPEND + bool "Suspend" + help + Executes pseudo-assembly instruction suspend in idle. +endchoice + +config MICROBLAZE_DUMP_ON_EXCEPTION + bool "Dump core on exceptions" + default y + +config EXTRA_EXCEPTION_INFO + bool "Extra exception debug information" + default y + help + Have exceptions print additional useful debugging information in + human-readable form, at the expense of code size. For example, + the cause code for an exception will be supplemented by a string + describing what that cause code means. + +# Bump the kernel default stack size values. +config MAIN_STACK_SIZE + default 4096 if COVERAGE_GCOV + default 2048 + +config IDLE_STACK_SIZE + default 1024 + +config ISR_STACK_SIZE + default 4096 + +config TEST_EXTRA_STACK_SIZE + default 4096 if COVERAGE_GCOV + default 2048 + +config SYSTEM_WORKQUEUE_STACK_SIZE + default 4096 + +source "arch/microblaze/Kconfig.features" + +endmenu diff --git a/arch/microblaze/Kconfig.features b/arch/microblaze/Kconfig.features new file mode 100644 index 000000000000..670c64818efd --- /dev/null +++ b/arch/microblaze/Kconfig.features @@ -0,0 +1,76 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +menu "HW Feature Options" + depends on MICROBLAZE + +# Picolibc with C++ support in Zephyr SDK is handled by Zephyr SDK's own Kconfig. +config PICOLIBC_SUPPORTED + bool + default n + help + Microblaze doesn't have picolibc support yet + +config MICROBLAZE_USE_MSR_INSTR + prompt "Use msrset & msrclr instructions" + def_bool y + help + This depends on CPU supporting msrset/msrclr instructions. + xparameters would define this as MICROBLAZE_USE_MSR_INSTR = 1. + +config MICROBLAZE_USE_BARREL_SHIFT_INSTR + prompt "Use barrel shift instructions" + def_bool y + help + This depends on CPU supporting barrel shift instructions. + xparameters would define this as USE_BARREL = 1. + +config MICROBLAZE_USE_PATTERN_COMPARE_INSTR + prompt "Use pattern compare instructions" + def_bool y + help + This depends on CPU supporting pattern compare instructions. + xparameters would define this as USE_PCMP = 1. + +config MICROBLAZE_USE_DIV_INSTR + prompt "Use division instructions" + def_bool y + help + This depends on CPU supporting hardware division instructions. + xparameters would define this as USE_DIV = 1. + +config MICROBLAZE_USE_MUL_INSTR + prompt "Use multiplication instructions" + def_bool y + help + This depends on CPU supporting hardware multiplication instructions. + xparameters would define this as USE_HW_MUL >= 1. + +config MICROBLAZE_USE_MULHI_INSTR + prompt "Use mulhi for multiplication of higher bits" + def_bool y + depends on MICROBLAZE_USE_MUL_INSTR + help + Use multiply high instructions for high part of 32x32 multiply. + This depends on CPU supporting hardware high multiplication instructions. + xparameters would define this as USE_HW_MUL = 2. + +config MICROBLAZE_USE_HARDWARE_FLOAT_INSTR + def_bool n + depends on CPU_HAS_FPU + help + This depends on CPU supporting hardware float instructions. + xparameters would define this as USE_FPU = 1. + +config MICROBLAZE_DATA_IS_TEXT_RELATIVE + bool "Assume data & text segment distance is static" + default y + help + Assume that the displacement between the text and data segments is fixed at + static link time. This allows data to be referenced by offset from start of + text address instead of GOT (r20) since PC-relative addressing is not supported. + Injects -mpic-data-is-text-relative + +endmenu diff --git a/arch/microblaze/core/CMakeLists.txt b/arch/microblaze/core/CMakeLists.txt new file mode 100644 index 000000000000..9f1db4f15452 --- /dev/null +++ b/arch/microblaze/core/CMakeLists.txt @@ -0,0 +1,36 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +zephyr_library() + +zephyr_library_sources( + cache.c + cpu_idle.c + crt0.S + exception.S + fatal.c + irq_manage.c + isr.S + emulate_isr.S + microblaze_disable_exceptions.S + microblaze_disable_dcache.S + microblaze_disable_icache.S + microblaze_disable_interrupts.S + microblaze_enable_exceptions.S + microblaze_enable_interrupts.S + microblaze_enable_dcache.S + microblaze_enable_icache.S + prep_c.c + reboot.c + reset.S + swap.S + thread.c +) + +if (CONFIG_MINIMAL_LIBC) + zephyr_library_sources(sbrk.c) +endif() + +zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c) diff --git a/arch/microblaze/core/cache.c b/arch/microblaze/core/cache.c new file mode 100644 index 000000000000..b9f585689baa --- /dev/null +++ b/arch/microblaze/core/cache.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include + +#include +#include +#include + +#include + +#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ARCH_CACHE) +#if defined(CONFIG_DCACHE) + +#define DCACHE_BASE DT_PROP_OR(_CPU, d_cache_base, 0) +#define ICACHE_BASE DT_PROP_OR(_CPU, i_cache_base, 0) + +#define DCACHE_SIZE DT_PROP_OR(_CPU, d_cache_size, 0) +#define ICACHE_SIZE DT_PROP_OR(_CPU, i_cache_size, 0) + +#define DCACHE_USE_WRITEBACK DT_PROP_OR(_CPU, d_cache_use_writeback, 0) + +/** + * @brief Enable the d-cache + * + * Enable the data cache. + */ +void arch_dcache_enable(void) +{ + microblaze_enable_dcache(); +} + +/** + * @brief Disable the d-cache + * + * Disable the data cache. + * It might be a good idea to flush the cache before disabling. + */ +void arch_dcache_disable(void) +{ + microblaze_disable_dcache(); +} + +/** + * @brief Flush the d-cache + * + * Flush the whole data cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_flush_all(void) +{ + return arch_dcache_flush_range(DCACHE_BASE, DCACHE_SIZE); +} + +/** + * @brief Invalidate the d-cache + * + * Invalidate the whole data cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_invd_all(void) +{ + return arch_dcache_invd_range(DCACHE_BASE, DCACHE_SIZE); +} + +/** + * @brief Flush and Invalidate the d-cache + * + * Flush and Invalidate the whole data cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_flush_and_invd_all(void) +{ + return arch_dcache_flush_and_invd_range(DCACHE_BASE, DCACHE_SIZE); +} + +/** + * @brief Flush an address range in the d-cache + * + * Flush the specified address range of the data cache. + * + * @param addr Starting address to flush. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_flush_range(void *addr, size_t size) +{ + /* if ! MICROBLAZE_DCACHE_USE_WRITEBACK then + * CPU doesn't support flushing without invalidating. + */ + if (0 == DCACHE_USE_WRITEBACK) + return -ENOTSUP; + + const size_t incrementer = 4 * sys_cache_data_line_size_get(); + const intptr_t end_addr = (intptr_t)addr + size; + /* Aligning start address */ + intptr_t address_iterator = (intptr_t)addr & (-incrementer); + + /* We only need to iterate up to the cache size */ + while (address_iterator < end_addr) { + wdc_flush(address_iterator); + address_iterator += incrementer; + } + + return 0; +} + +/** + * @brief Invalidate an address range in the d-cache + * + * Invalidate the specified address range of the data cache. + * + * @param addr Starting address to invalidate. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_invd_range(void *addr, size_t size) +{ + const uint32_t incrementer = 4 * sys_cache_data_line_size_get(); + const size_t end_addr = (intptr_t)addr + size; + + /* Aligning start address */ + intptr_t address_iterator = (intptr_t)addr & (-incrementer); + + /* We only need to iterate up to the cache size */ + while (address_iterator < end_addr) { + wic(address_iterator); + address_iterator += incrementer; + } + + return 0; +} + +/** + * @brief Flush and Invalidate an address range in the d-cache + * + * Flush and Invalidate the specified address range of the data cache. + * + * @param addr Starting address to flush and invalidate. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_dcache_flush_and_invd_range(void *addr, size_t size) +{ + const uint32_t incrementer = 4 * sys_cache_data_line_size_get(); + const size_t end_addr = (intptr_t)addr + size; + + /* Aligning start address */ + intptr_t address_iterator = (intptr_t)addr & (-incrementer); + + /* We only need to iterate up to the cache size */ + while (address_iterator < end_addr) { + wdc_clear(address_iterator); + address_iterator += incrementer; + } + + return 0; +} + +#endif /* CONFIG_DCACHE */ + +#if defined(CONFIG_ICACHE) +/** + * @brief Enable the i-cache + * + * Enable the instruction cache. + */ +void arch_icache_enable(void) +{ + microblaze_enable_icache(); +} + +/** + * @brief Disable the i-cache + * + * Disable the instruction cache. + */ +void arch_icache_disable(void) +{ + microblaze_disable_icache(); +} + +/** + * @brief Flush the i-cache + * + * Flush the whole instruction cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_flush_all(void) +{ + return -ENOTSUP; +} + +/** + * @brief Invalidate the i-cache + * + * Invalidate the whole instruction cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_invd_all(void) +{ + return arch_icache_invd_range(ICACHE_BASE, ICACHE_SIZE); +} + +/** + * @brief Flush and Invalidate the i-cache + * + * Flush and Invalidate the whole instruction cache. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_flush_and_invd_all(void) +{ + return -ENOTSUP; +} + +/** + * @brief Flush an address range in the i-cache + * + * Flush the specified address range of the instruction cache. + * + * @param addr Starting address to flush. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_flush_range(void *addr, size_t size) +{ + ARG_UNUSED(addr); + ARG_UNUSED(size); + + return -ENOTSUP; +} + +/** + * @brief Invalidate an address range in the i-cache + * + * Invalidate the specified address range of the instruction cache. + * + * @param addr Starting address to invalidate. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_invd_range(void *addr, size_t size) +{ + int key = irq_lock(); + + arch_icache_disable(); + + const uint32_t incrementer = 4 * sys_cache_instr_line_size_get(); + const size_t end_addr = (intptr_t)addr + size; + + /* Aligning start address */ + intptr_t address_iterator = (intptr_t)addr & (-incrementer); + + /* We only need to iterate up to the cache size */ + while (address_iterator < end_addr) { + wic(address_iterator); + address_iterator += incrementer; + } + + arch_icache_enable(); + irq_unlock(key); + + return 0; +} + +/** + * @brief Flush and Invalidate an address range in the i-cache + * + * Flush and Invalidate the specified address range of the instruction cache. + * + * @param addr Starting address to flush and invalidate. + * @param size Range size. + * + * @retval 0 If succeeded. + * @retval -ENOTSUP If not supported. + * @retval -errno Negative errno for other failures. + */ +int arch_icache_flush_and_invd_range(void *addr, size_t size) +{ + ARG_UNUSED(addr); + ARG_UNUSED(size); + + return -ENOTSUP; +} + +#endif /* CONFIG_ICACHE */ +#endif /* CONFIG_CACHE_MANAGEMENT && CONFIG_ARCH_CACHE */ diff --git a/arch/microblaze/core/cpu_idle.c b/arch/microblaze/core/cpu_idle.c new file mode 100644 index 000000000000..c3bc748a5e75 --- /dev/null +++ b/arch/microblaze/core/cpu_idle.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include + +static ALWAYS_INLINE void microblaze_idle(unsigned int key) +{ + sys_trace_idle(); + + /* wait for interrupt */ +#if defined(CONFIG_MICROBLAZE_IDLE_SLEEP) + { + __asm__ __volatile__("sleep\t"); + } +#elif defined(CONFIG_MICROBLAZE_IDLE_HIBERNATE) + { + __asm__ __volatile__("hibernate\t"); + } +#elif defined(CONFIG_MICROBLAZE_IDLE_SUSPEND) + { + __asm__ __volatile__("suspend\t"); + } +#elif defined(CONFIG_MICROBLAZE_IDLE_NOP) + { + __asm__ __volatile__("nop\t"); + } +#endif + /* unlock interrupts */ + irq_unlock(key); +} + +void arch_cpu_idle(void) +{ + microblaze_idle(1); +} + +void arch_cpu_atomic_idle(unsigned int key) +{ + microblaze_idle(key); +} + +/** + * @brief Defined weak so SoCs/Boards with timers can override. + * This is an approximate busy wait implementation that executes + * a number of NOPs to obtain an approximate of the desired delay. + * + * @param usec_to_wait + */ +static void ALWAYS_INLINE arch_busy_wait_1ms(void) +{ +#define LOOP_LIMIT (427ULL * 200000000ULL / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) + for (uint64_t counter = 0; counter < LOOP_LIMIT; counter++) { + arch_nop(); + } +} + +__weak void arch_busy_wait(uint32_t usec_to_wait) +{ + for (uint32_t msecs = 0; msecs < usec_to_wait / USEC_PER_MSEC; msecs++) { + arch_busy_wait_1ms(); + } +} diff --git a/arch/microblaze/core/crt0.S b/arch/microblaze/core/crt0.S new file mode 100644 index 000000000000..c8f21a00e1f6 --- /dev/null +++ b/arch/microblaze/core/crt0.S @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + MicroBlaze Vector Map for standalone executables + + Address Vector type Label + ------- ----------- ------ + + # 0x00 # (-- IMM --) + # 0x04 # Reset _start + + # 0x08 # (-- IMM --) + # 0x0c # Software Exception _exception_handler + + # 0x10 # (-- IMM --) + # 0x14 # Hardware Interrupt _interrupt_handler + + # 0x18 # (-- IMM --) + # 0x1C # Breakpoint Exception (-- Don't Care --) + + # 0x20 # (-- IMM --) + # 0x24 # Hardware Exception _hw_exception_handler + +*/ + +#include +#include + +/* imports */ +GTEXT(_PrepC) + + .section .vectors.sw_exception, "ax" + .balign 4 +_vector_sw_exception: + brai _exception_handler_entry + + .section .vectors.interrupt, "ax" + .balign 4 +_vector_interrupt: + brai _interrupt_handler + + .section .vectors.hw_exception, "ax" + .balign 4 +_vector_hw_exception: + brai _exception_handler_entry + + .section .text + .globl _start + .balign 4 + .ent _start + .type _start, @function + +_start: + /* Set the Small Data Anchors and the stack pointer */ + ori r13, r0, _SDA_BASE_ + ori r2, r0, _SDA2_BASE_ + + /* Initialize global pointer with the linker variable we set */ + ori r20, r0, _gp + +#ifdef CONFIG_INIT_STACKS + /* Pre-populate all bytes in z_interrupt_stacks with 0xAA */ + ori r3, r0, z_interrupt_stacks + addik r4, r3, __z_interrupt_stack_SIZEOF + ori r5, r0, 0xaaaaaaaa + + /* Populate z_interrupt_stacks with 0xaaaaaaaa */ +aa_loop: + sw r5, r0, r3 + addik r3, r3, 4 + cmpu r6, r3, r4 + bnei r6, aa_loop +#endif + + /* Load the initial stack */ + ori r1, r0, z_interrupt_stacks + addik r1, r1, __z_interrupt_stack_SIZEOF + +/* Initialize BSS and run program */ + /* clear SBSS */ + addi r6, r0, __sbss_start + addi r7, r0, __sbss_end + rsub r18, r6, r7 + blei r18, .Lendsbss + +.Lloopsbss: + swi r0, r6, 0 + addi r6, r6, 4 + rsub r18, r6, r7 + bgti r18, .Lloopsbss +.Lendsbss: + +/* bss region is cleaned up by _PrepC */ + + addi r6, r0, 0 /* Initialize argc = 0 */ + addi r7, r0, 0 /* Set envp = NULL*/ + brlid r15, _PrepC /* Execute the program */ + addi r5, r0, 0 /* Set argv = NULL */ + + /* Call exit with the return value of main */ + brlid r15, _exit + addik r5, r3, 0 + + /* Control does not reach here */ + .end _start diff --git a/arch/microblaze/core/emulate_isr.S b/arch/microblaze/core/emulate_isr.S new file mode 100644 index 000000000000..2205c1ed5fb2 --- /dev/null +++ b/arch/microblaze/core/emulate_isr.S @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#include +#include +#include + + +.extern _interrupt_handler +.globl microblaze_emulate_isr + +/* void microblaze_emulate_isr(void) + * Must be called with irqs unlocked! + * Must be called via inline asm with r14 as link register + */ +SECTION_FUNC(exception.other, microblaze_emulate_isr) + .ent microblaze_emulate_isr + /* Set the interrupt return address to after the + * call to microblaze_emulate_isr */ + ADD_IMM(r14, 4) + brai _interrupt_handler + .end microblaze_emulate_isr diff --git a/arch/microblaze/core/exception.S b/arch/microblaze/core/exception.S new file mode 100644 index 000000000000..8872d061ab8b --- /dev/null +++ b/arch/microblaze/core/exception.S @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include +#include +#include + + +/* import */ +.extern stack_pointer_on_exception_entry +.extern _Fault + +/* export */ +.global _exception_handler_entry +.global _asm_stack_failed + + + .text + .balign 4 + +_exception_handler_entry: + + /* Make room for the context on the stack. */ + STACK_ALLOC(__struct_arch_esf_SIZEOF) + ASSERT_GT_ZERO(r1, _asm_stack_failed) + + /* Take a copy of the stack pointer at the moment w/ context stored, + storing its value prior to the function stack frame being created. */ + STORE_REG_TO_ADDR(r1, stack_pointer_on_exception_entry) + + /* Stack MSR using r11(temp1) */ + mfs TEMP_DATA_REG, rmsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(msr)) + + #if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + /* Stack FSR using TEMP_DATA_REG(temp1) */ + mfs TEMP_DATA_REG, rfsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(fsr)) + #endif + + PUSH_CONTEXT_TO_STACK(r31) + PUSH_CONTEXT_TO_STACK(r30) + PUSH_CONTEXT_TO_STACK(r29) + PUSH_CONTEXT_TO_STACK(r28) + PUSH_CONTEXT_TO_STACK(r27) + PUSH_CONTEXT_TO_STACK(r26) + PUSH_CONTEXT_TO_STACK(r25) + PUSH_CONTEXT_TO_STACK(r24) + PUSH_CONTEXT_TO_STACK(r23) + PUSH_CONTEXT_TO_STACK(r22) + PUSH_CONTEXT_TO_STACK(r21) + PUSH_CONTEXT_TO_STACK(r20) + PUSH_CONTEXT_TO_STACK(r19) + PUSH_CONTEXT_TO_STACK(r18) + PUSH_CONTEXT_TO_STACK(r17) + PUSH_CONTEXT_TO_STACK(r16) + PUSH_CONTEXT_TO_STACK(r15) + PUSH_CONTEXT_TO_STACK(r14) + PUSH_CONTEXT_TO_STACK(r13) + PUSH_CONTEXT_TO_STACK(r12) + PUSH_CONTEXT_TO_STACK(r11) + PUSH_CONTEXT_TO_STACK(r10) + PUSH_CONTEXT_TO_STACK(r9) + PUSH_CONTEXT_TO_STACK(r8) + PUSH_CONTEXT_TO_STACK(r7) + PUSH_CONTEXT_TO_STACK(r6) + PUSH_CONTEXT_TO_STACK(r5) + PUSH_CONTEXT_TO_STACK(r4) + PUSH_CONTEXT_TO_STACK(r3) + PUSH_CONTEXT_TO_STACK(r2) + + mfs r5, resr + mfs r6, rear + mfs r7, redr + + braid _Fault + nop + + .text + .balign 4 + +_asm_stack_failed: +/* Should always be called with interrupts disabled + * so that ISR doesn't overwrite irq stack + * Currently only jumped from isr and swap entry. + */ + + /* stack has failed so we immediately need to switch to an emergency stack */ + SET_REG(r1, z_interrupt_stacks) + braid _exception_handler_entry + STACK_FREE(__z_interrupt_stack_SIZEOF) diff --git a/arch/microblaze/core/fatal.c b/arch/microblaze/core/fatal.c new file mode 100644 index 000000000000..a959c88971ad --- /dev/null +++ b/arch/microblaze/core/fatal.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#include +#include +#include +#include + +#include + +/* Hardware includes. */ +#include "microblaze/mb_interface.h" +#include "microblaze/microblaze_regs.h" + +LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); + +/* The number of bytes a MicroBlaze instruction consumes. */ +#define MICROBLAZE_INSTRUCTION_SIZE 4 + +extern void _exception_handler_entry(void *exception_id); + +/* Used by assembly routine _exception_handler_entry to store sp */ +void *stack_pointer_on_exception_entry; + +FUNC_NORETURN void z_microblaze_fatal_error(unsigned int reason, + const microblaze_register_dump_t *dump) +{ + if (dump != NULL && IS_ENABLED(CONFIG_MICROBLAZE_DUMP_ON_EXCEPTION)) { + printk("r1:\t0x%08x\t(sp)\n", dump->esf.r1); + printk("r2:\t0x%08x\t(small data area)\n", dump->esf.r2); + printk("r3:\t0x%x\t\t(retval 1)\n", dump->esf.r3); + printk("r4:\t0x%x\t\t(retval 2)\n", dump->esf.r4); + printk("r5:\t0x%x\t\t(arg1)\n", dump->esf.r5); + printk("r6:\t0x%x\t\t(arg2)\n", dump->esf.r6); + printk("r7:\t0x%x\t\t(arg3)\n", dump->esf.r7); + printk("r8:\t0x%x\t\t(arg4)\n", dump->esf.r8); + printk("r9:\t0x%x\t\t(arg5)\n", dump->esf.r9); + printk("r10:\t0x%x\t\t(arg6)\n", dump->esf.r10); + printk("r11:\t0x%08x\t(temp1)\n", dump->esf.r11); + printk("r12:\t0x%08x\t(temp2)\n", dump->esf.r12); + printk("r13:\t0x%08x\t(rw small data area)\n", dump->esf.r13); + printk("r14:\t0x%08x\t(return from interrupt)\n", dump->esf.r14); + printk("r15:\t0x%08x\t(return from subroutine)\n", dump->esf.r15); + printk("r16:\t0x%08x\t(return from trap)\n", dump->esf.r16); + printk("r17:\t0x%08x\t(return from exception)\n", dump->esf.r17); + printk("r18:\t0x%08x\t(compiler/assembler temp)\n", dump->esf.r18); + printk("r19:\t0x%08x\t(global offset table ptr)\n", dump->esf.r19); + printk("r20:\t0x%x\n", dump->esf.r20); + printk("r21:\t0x%x\n", dump->esf.r21); + printk("r22:\t0x%x\n", dump->esf.r22); + printk("r23:\t0x%x\n", dump->esf.r23); + printk("r24:\t0x%x\n", dump->esf.r24); + printk("r25:\t0x%x\n", dump->esf.r25); + printk("r26:\t0x%x\n", dump->esf.r26); + printk("r27:\t0x%x\n", dump->esf.r27); + printk("r28:\t0x%x\n", dump->esf.r28); + printk("r29:\t0x%x\n", dump->esf.r29); + printk("r30:\t0x%x\n", dump->esf.r30); + printk("r31:\t0x%x\n", dump->esf.r31); + + printk("MSR:\t0x%08x\t(exc)\n", dump->esf.msr); +#if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + printk("FSR:\t%08x\n", dump->esf.fsr); +#endif + printk("ESR:\t0x%08x\n", dump->esr); + printk("EAR:\t0x%x\n", dump->ear); + printk("EDR:\t0x%x\n", dump->edr); + printk("PC:\t0x%x\n", dump->pc); + } + + /* This hack allows us to re-enable exceptions properly before continuing. + * r15 is safe to use becaue this function is noreturn. + */ + __asm__ volatile("\tmfs r15, rpc\n" + "\trted r15, 0x8\n" + "\tnop\n"); + + printk("MSR:\t0x%08x\t(%s)\n", mfmsr(), __func__); + + z_fatal_error(reason, &dump->esf); + CODE_UNREACHABLE; +} + +#if defined(CONFIG_EXTRA_EXCEPTION_INFO) +static char *cause_str(uint32_t cause) +{ + switch (cause) { + case 0: + return "stream exception"; + case 1: + return "unaligned data access exception"; + case 2: + return "illegal op-code exception"; + case 3: + return "instruction bus error exception"; + case 4: + return "data bus error exception"; + case 5: + return "divide exception"; + case 6: + return "floating point unit exception"; + case 7: + return "privileged instruction exception"; + case 8: + return "stack protection violation exception"; + case 9: + return "data storage exception"; + case 10: + return "instruction storage exception"; + case 11: + return "data TLB miss exception"; + case 12: + return "instruction TLB miss exception"; + default: + return "unknown"; + } +} +#endif /* defined(CONFIG_EXTRA_EXCEPTION_INFO) */ + +FUNC_NORETURN void _Fault(uint32_t esr, uint32_t ear, uint32_t edr) +{ + static microblaze_register_dump_t microblaze_register_dump = {0}; + /* Log the simplest possible exception information before anything */ + uint32_t cause = (mfesr() & CAUSE_EXP_MASK) >> CAUSE_EXP_SHIFT; + + LOG_ERR(""); +#if defined(CONFIG_EXTRA_EXCEPTION_INFO) + LOG_ERR("Cause: %d, %s", cause, cause_str(cause)); +#endif /* defined(CONFIG_EXTRA_EXCEPTION_INFO) */ + + /* Fill an register dump structure with the MicroBlaze context as it + * was immediately before the exception occurrence. + */ + + __ASSERT_NO_MSG(stack_pointer_on_exception_entry); + struct arch_esf *sp_ptr = (struct arch_esf *)stack_pointer_on_exception_entry; + + /* Obtain the values of registers that were stacked prior to this function + * being called, and may have changed since they were stacked. + */ + microblaze_register_dump.esf = *sp_ptr; + microblaze_register_dump.esf.r1 = ((uint32_t)sp_ptr) + sizeof(struct arch_esf); + microblaze_register_dump.esr = esr; + microblaze_register_dump.ear = ear; + microblaze_register_dump.edr = edr; + + /* Move the saved program counter back to the instruction that was executed + * when the exception occurred. This is only valid for certain types of + * exception. + */ + microblaze_register_dump.pc = + microblaze_register_dump.esf.r17 - MICROBLAZE_INSTRUCTION_SIZE; + + /* Also fill in a string that describes what type of exception this is. + * The string uses the same ID names as defined in the MicroBlaze standard + * library exception header files. + */ +#if defined(CONFIG_EXTRA_EXCEPTION_INFO) + microblaze_register_dump.exception_cause_str = cause_str(cause); +#endif /* defined(CONFIG_EXTRA_EXCEPTION_INFO) */ + + z_microblaze_fatal_error(K_ERR_CPU_EXCEPTION, µblaze_register_dump); + CODE_UNREACHABLE; +} diff --git a/arch/microblaze/core/irq_manage.c b/arch/microblaze/core/irq_manage.c new file mode 100644 index 000000000000..c1c8a2bbe6da --- /dev/null +++ b/arch/microblaze/core/irq_manage.c @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); + +static uint32_t emulated_irq_pending; + +FUNC_NORETURN void z_irq_spurious(const void *param) +{ + LOG_ERR("Spurious interrupt detected!\n" + "\tmsr: %x\n" + "\tesr: %x\n" + "\tear: %x\n" + "\tedr: %x\n" + "\tparam: %x\n", + (unsigned int)mfmsr(), (unsigned int)mfesr(), (unsigned int)mfear(), + (unsigned int)mfedr(), (unsigned int)param); + + z_microblaze_fatal_error(K_ERR_SPURIOUS_IRQ, NULL); + CODE_UNREACHABLE; +} + +/** + * @brief Returns if IE bit is enabled + * Defined weak so soc/boards can override it. + */ +__weak int arch_irq_is_enabled(unsigned int irq) +{ + ARG_UNUSED(irq); + return mfmsr() & MSR_IE_MASK; +} + +/** + * @brief Simply unlocks all IRQs. + * Defined weak so soc/boards can override it. + */ +__weak void arch_irq_enable(unsigned int irq) +{ + ARG_UNUSED(irq); + arch_irq_unlock(1); +}; + +/** + * @brief Simply locks all IRQs. + * Defined weak so soc/boards can override it. + */ +__weak void arch_irq_disable(unsigned int irq) +{ + ARG_UNUSED(irq); + arch_irq_lock(); +}; + +/** + * @brief Returns the currently pending interrupts. + * This function should be overridden if an AXI interrupt + * controller is placed inside the SoC. + * Since there's no way to tell if a barebones MicroBlaze is + * pending on an interrupt signal, this function will return 1 on first call, + * and returns 0 on second call, which is enough to + * make the _enter_irq break its while(ipending) loop. + * This is a logically correct hack to make _enter_irq below work for + * barebones MicroBlaze without introducing extra logic to the _enter_irq logic. + * Obviously, this function shouldn't be used for generic purposes and is merely + * a weak stub for soc/boards to override with more meaningful implementations. + * + * @return Pending IRQ bitmask. Pending IRQs will have their bitfield set to 1. 0 if no interrupt is + * pending. + */ +__weak uint32_t arch_irq_pending(void) +{ + static uint32_t call_count; + + /* xor with 1 should simply invert the value */ + call_count ^= 1; + return call_count; +}; + +__weak uint32_t arch_irq_pending_vector(uint32_t irq_pending) +{ + return find_lsb_set(irq_pending) - 1; +} + +/** + * @brief * Even in the presence of an interrupt controller, once the real mode + * is enabled, there is no way to emulate hw interrupts. So this routine provides + * a software "triggering" capability to MicroBlaze. This routine MUST be called + * either with IRQs locked or interrupts disabled otherwise a real IRQ could fire. + * Also see emulate_isr.h + * + * @param irq IRQ number to enable. + * @return uint32_t returns the final emulated irq_pending mask. + */ +ALWAYS_INLINE uint32_t arch_irq_set_emulated_pending(uint32_t irq) +{ + return emulated_irq_pending |= BIT(irq); +} + +/** + * @brief Called by _interrupt_handler in isr.S + */ +void _enter_irq(void) +{ + _kernel.cpus[0].nested++; + +#ifdef CONFIG_IRQ_OFFLOAD + z_irq_do_offload(); +#endif + + uint32_t real_irq_pending = 0; + + while (true) { + struct _isr_table_entry *ite; + + real_irq_pending = arch_irq_pending(); + + if (real_irq_pending == 0 && emulated_irq_pending == 0) { + break; + } + +#ifdef CONFIG_TRACING_ISR + sys_trace_isr_enter(); +#endif + + /* From pg099 AXI Interrupt Controller (INTC) product guide: + * The least significant bit (LSB, in this case bit 0) has the highest priority. + */ + const uint32_t index = (real_irq_pending != 0) + ? arch_irq_pending_vector(real_irq_pending) + : find_lsb_set(emulated_irq_pending) - 1; + + ite = &_sw_isr_table[index]; + + ite->isr(ite->arg); + + /* In this implementation it's the ISR's responsibility to clear irq flags. + * But _enter_irq does clear the emulated IRQs automatically since this is a port + * provided functionality and also needed to pass unit tests without altering tests. + */ + emulated_irq_pending &= ~BIT(index); + +#ifdef CONFIG_TRACING_ISR + sys_trace_isr_exit(); +#endif + } + + _kernel.cpus[0].nested--; +#ifdef CONFIG_STACK_SENTINEL + z_check_stack_sentinel(); +#endif +} + +#ifdef CONFIG_DYNAMIC_INTERRUPTS +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(const void *parameter), const void *parameter, + uint32_t flags) +{ + ARG_UNUSED(flags); + ARG_UNUSED(priority); + + z_isr_install(irq, routine, parameter); + return irq; +} +#endif /* CONFIG_DYNAMIC_INTERRUPTS */ diff --git a/arch/microblaze/core/irq_offload.c b/arch/microblaze/core/irq_offload.c new file mode 100644 index 000000000000..13a4db0f40a7 --- /dev/null +++ b/arch/microblaze/core/irq_offload.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#include +#include +#include + +#include + +#pragma message "MicroBlaze irq_offload is experimental" + +volatile irq_offload_routine_t _offload_routine; +static volatile const void *offload_param; + +/* Called by _enter_irq if regardless of pending irqs. + * Just in case the offload routine itself reenables & generates + * an interrupt, clear the offload_routine global before executing. + */ +void z_irq_do_offload(void) +{ + irq_offload_routine_t tmp; + + if (!_offload_routine) { + return; + } + + tmp = _offload_routine; + _offload_routine = NULL; + + tmp((const void *)offload_param); +} + +void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) +{ + microblaze_disable_interrupts(); + + _offload_routine = routine; + offload_param = parameter; + + EMULATE_ISR(); +} + +void arch_irq_offload_init(void) +{ +} diff --git a/arch/microblaze/core/isr.S b/arch/microblaze/core/isr.S new file mode 100644 index 000000000000..d834d9b6f9d6 --- /dev/null +++ b/arch/microblaze/core/isr.S @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include +#include +#include + +/* import */ +.extern _asm_stack_failed + +/* export */ +.global _interrupt_handler + +/* The context is oversized to allow functions called from the ISR to write + * back into the caller stack. The size is defined as __struct_arch_esf_SIZEOF in offsets.c + */ + +/* Offsets from the stack pointer at which saved registers are placed. + * These are generated using the definitions in offsets.c + */ + + .text + .balign 4 + +_interrupt_handler: + /* Make room for the context on the stack. */ + STACK_ALLOC(__struct_arch_esf_SIZEOF) + ASSERT_GT_ZERO(r1, _asm_stack_failed) + + PUSH_CONTEXT_TO_STACK(r31) + PUSH_CONTEXT_TO_STACK(r30) + PUSH_CONTEXT_TO_STACK(r29) + PUSH_CONTEXT_TO_STACK(r28) + PUSH_CONTEXT_TO_STACK(r27) + PUSH_CONTEXT_TO_STACK(r26) + PUSH_CONTEXT_TO_STACK(r25) + PUSH_CONTEXT_TO_STACK(r24) + PUSH_CONTEXT_TO_STACK(r23) + PUSH_CONTEXT_TO_STACK(r22) + PUSH_CONTEXT_TO_STACK(r21) + PUSH_CONTEXT_TO_STACK(r20) + PUSH_CONTEXT_TO_STACK(r19) + PUSH_CONTEXT_TO_STACK(r18) + PUSH_CONTEXT_TO_STACK(r17) + PUSH_CONTEXT_TO_STACK(r16) + /* Stack the return address from user sub-routines */ + PUSH_CONTEXT_TO_STACK(r15) + /* Stack the return address from interrupt */ + PUSH_CONTEXT_TO_STACK(r14) + PUSH_CONTEXT_TO_STACK(r13) + PUSH_CONTEXT_TO_STACK(r12) + PUSH_CONTEXT_TO_STACK(r11) + PUSH_CONTEXT_TO_STACK(r10) + PUSH_CONTEXT_TO_STACK(r9) + PUSH_CONTEXT_TO_STACK(r8) + PUSH_CONTEXT_TO_STACK(r7) + PUSH_CONTEXT_TO_STACK(r6) + PUSH_CONTEXT_TO_STACK(r5) + PUSH_CONTEXT_TO_STACK(r4) + PUSH_CONTEXT_TO_STACK(r3) + PUSH_CONTEXT_TO_STACK(r2) + + /* Stack MSR using r11(temp1) */ + mfs TEMP_DATA_REG, rmsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(msr)) + + #if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + /* Stack FSR using TEMP_DATA_REG(temp1) */ + mfs TEMP_DATA_REG, rfsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(fsr)) + #endif + + /* Get a reference to _kernel in r11 (temp1) -> r11 = &_kernel */ + SET_REG(KERNEL_REF_REG, _kernel) + /* Get a reference to current thread in r12 (temp2): r12 = *(&kernel+offsetof(current)) */ + LOAD_CURRENT_THREAD(CURRENT_THREAD_REG) + /* Save the stack pointer to current thread */ + STORE_TO_CURRENT_THREAD(r1, _thread_offset_to_r1) + /* Write 1 to key to indicate that IRQs are currently unlocked */ + SET_REG(TEMP_DATA_REG, 1) + STORE_TO_CURRENT_THREAD(TEMP_DATA_REG, _thread_offset_to_key) + /* Write 1 to preempted to indicate this thread yielded from ISR */ + STORE_TO_CURRENT_THREAD(TEMP_DATA_REG, _thread_offset_to_preempted) + + /* Switch to the ISR stack: r1 = *(&_kernel+offsetof(irq_stack)) */ + SWITCH_TO_IRQ_STACK(r1) + /* r1 (sp) now points to cpu dedicated irq stack */ + + /* Assert ISR stack is sane */ + ASSERT_GT_ZERO(r1, _asm_stack_failed) + +on_irq_stack: + /* Enter C interrupt handling code. + * Execute any pending interrupts. + */ + CALL(_enter_irq, \ + DELAY_SLOT(nop)) + + /* Interrupt handler finished and the interrupt should be serviced + * now, the appropriate bits in ipending should be cleared + */ + +#ifdef CONFIG_PREEMPT_ENABLED + + /* Get a reference to _kernel again in r11 (temp1) = &_kernel */ + SET_REG(KERNEL_REF_REG, _kernel) + /* Get a reference to current thread in r12 (temp2) = *(&kernel+offsetof(current)) */ + LOAD_CURRENT_THREAD(CURRENT_THREAD_REG) + /* Get a reference to ready_q.cache in r4 (retval1) = *(&_kernel+offsetof(ready_q.cache)) */ + LOAD_NEXT_THREAD(NEXT_THREAD_REG) + /* Check to see if a scheduling decision is necessary + * by comparing current vs kernel.ready_q.cache threads */ + cmpu CURRENT_THREAD_REG, NEXT_THREAD_REG, CURRENT_THREAD_REG // r12 should be 0 if r4==r12 + /* Delay slot instruction has no effect if they're equal */ + JUMP_IF_ZERO(CURRENT_THREAD_REG, _isr_restore_and_exit, \ + DELAY_SLOT(COPY_REG(CURRENT_THREAD_REG, NEXT_THREAD_REG))) + + /* A context reschedule is required */ +#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) + CALL(z_thread_mark_switched_out, \ + DELAY_SLOT(nop)) +#endif + /* Get a reference to _kernel again in r11 (temp1) = &_kernel */ + SET_REG(KERNEL_REF_REG, _kernel) + /* Get a reference to ready_q.cache in r4 (retval1) = *(&_kernel+offsetof(ready_q.cache)) */ + LOAD_NEXT_THREAD(NEXT_THREAD_REG) + /* Scheduler seems to run whenever a task is about to yield either + * because of voluntary yield or blocking. So it runs in "spare time". + * Definitely not supposed to be in ISR so we're OK but just in case + */ + WRITE_TO_KERNEL_CURRENT(NEXT_THREAD_REG) + /* _kernel.current is now equal to _kernel.ready_q.cache */ +#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) + CALL(z_thread_mark_switched_in, \ + DELAY_SLOT(nop)) +#endif +#endif /* ifdef CONFIG_PREEMPT_ENABLED */ + +_isr_restore_and_exit: + /* Get a reference to _kernel again in r11 (temp1) = &_kernel */ + SET_REG(KERNEL_REF_REG, _kernel) + /* Get a reference to current thread in r12 (temp2) = *(&kernel+offsetof(current)) */ + LOAD_CURRENT_THREAD(CURRENT_THREAD_REG) + /* Grab the stack pointer from "new" current thread */ + LOAD_FROM_CURRENT_THREAD(r1, _thread_offset_to_r1) + LOAD_FROM_CURRENT_THREAD(TEMP_DATA_REG, _thread_offset_to_preempted) + /* skip set retval if preempted == 1 */ + JUMP_IF_NONZERO(TEMP_DATA_REG, _isr_skip_set_retval, \ + DELAY_SLOT(POP_CONTEXT_FROM_STACK(r3))) + + /* Load return value into r3 (returnval1). + * -EAGAIN unless someone previously called arch_thread_return_value_set() + */ + LOAD_FROM_CURRENT_THREAD(r3, _thread_offset_to_retval) + +_isr_skip_set_retval: + + POP_CONTEXT_FROM_STACK(r31) + POP_CONTEXT_FROM_STACK(r30) + POP_CONTEXT_FROM_STACK(r29) + POP_CONTEXT_FROM_STACK(r28) + POP_CONTEXT_FROM_STACK(r27) + POP_CONTEXT_FROM_STACK(r26) + POP_CONTEXT_FROM_STACK(r25) + POP_CONTEXT_FROM_STACK(r24) + POP_CONTEXT_FROM_STACK(r23) + POP_CONTEXT_FROM_STACK(r22) + POP_CONTEXT_FROM_STACK(r21) + POP_CONTEXT_FROM_STACK(r20) + POP_CONTEXT_FROM_STACK(r19) + POP_CONTEXT_FROM_STACK(r18) + POP_CONTEXT_FROM_STACK(r17) + POP_CONTEXT_FROM_STACK(r16) + POP_CONTEXT_FROM_STACK(r15) + POP_CONTEXT_FROM_STACK(r14) + POP_CONTEXT_FROM_STACK(r13) + POP_CONTEXT_FROM_STACK(r12) + POP_CONTEXT_FROM_STACK(r11) + POP_CONTEXT_FROM_STACK(r10) + POP_CONTEXT_FROM_STACK(r9) + POP_CONTEXT_FROM_STACK(r8) + POP_CONTEXT_FROM_STACK(r7) + POP_CONTEXT_FROM_STACK(r6) + POP_CONTEXT_FROM_STACK(r5) + POP_CONTEXT_FROM_STACK(r4) + /* r3-retval is restored or set previously */ + POP_CONTEXT_FROM_STACK(r2) + + /* BEGIN restore carry bit */ + LOAD_FROM_STACK(TEMP_DATA_REG, ESF_OFFSET(msr)) + MASK_BITS(TEMP_DATA_REG, MSR_C_MASK) + JUMP_IF_ZERO(TEMP_DATA_REG, _isr_clear_carry, \ + DELAY_SLOT(mfs TEMP_DATA_REG, rmsr)) + +_isr_set_carry: + SET_BITS(TEMP_DATA_REG, MSR_C_MASK) + JUMP(_isr_restore_carry, \ + DELAY_SLOT(nop)) + +_isr_clear_carry: + CLEAR_BITS(TEMP_DATA_REG, MSR_C_MASK) + +_isr_restore_carry: + mts rmsr, TEMP_DATA_REG + /* END restore carry bit */ + + #if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + /* Reload the FSR from the stack. */ + LOAD_FROM_STACK(TEMP_DATA_REG, ESF_OFFSET(fsr)) + mts rfsr, TEMP_DATA_REG + #endif + + /* r10, r14 was being used as a temporary. Now restore its true value from the stack. */ + POP_CONTEXT_FROM_STACK(TEMP_DATA_REG) + + +_isr_interrupt_exit: + /* Return using rtid so interrupts are re-enabled as this function is + * exited. Use r14(hw enforced return from interrupt address) + */ + rtid r14, 0 + /* Put the stack pointer back where it was when we entered + * exception state i.e. remove the stack frame. + */ + STACK_FREE(__struct_arch_esf_SIZEOF) diff --git a/arch/microblaze/core/microblaze_disable_dcache.S b/arch/microblaze/core/microblaze_disable_dcache.S new file mode 100644 index 000000000000..c43dfe479314 --- /dev/null +++ b/arch/microblaze/core/microblaze_disable_dcache.S @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_disable_dcache + .ent microblaze_disable_dcache + .balign 4 +microblaze_disable_dcache: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrclr r0, 0x80 +#else /* CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 */ + addik r1, r1, -8 + mfs r11, rmsr + andi r11, r11, ~(0x80) + mts rmsr, r11 + addik r1, r1, 8 + rtsd r15, 8 + nop +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + .end microblaze_disable_dcache diff --git a/arch/microblaze/core/microblaze_disable_exceptions.S b/arch/microblaze/core/microblaze_disable_exceptions.S new file mode 100644 index 000000000000..38f3a70fda9b --- /dev/null +++ b/arch/microblaze/core/microblaze_disable_exceptions.S @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_disable_exceptions + .ent microblaze_disable_exceptions + .balign 4 +microblaze_disable_exceptions: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrclr r0, 0x100 +#else + mfs r4, rmsr; + /* Turn OFF the EE bit */ + andi r4, r4, ~(0x100); + mts rmsr, r4; + rtsd r15, 8; + nop; +#endif +.end microblaze_disable_exceptions diff --git a/arch/microblaze/core/microblaze_disable_icache.S b/arch/microblaze/core/microblaze_disable_icache.S new file mode 100644 index 000000000000..d4c03fbb4498 --- /dev/null +++ b/arch/microblaze/core/microblaze_disable_icache.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_disable_icache + .ent microblaze_disable_icache + .balign 4 +microblaze_disable_icache: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrclr r0, 0x20 +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + #Read the MSR register + mfs r8, rmsr + #Clear the icache enable bit + andi r8, r8, ~(0x20) + #Save the MSR register + mts rmsr, r8 + #Return + rtsd r15, 8 + nop +#endif + .end microblaze_disable_icache diff --git a/arch/microblaze/core/microblaze_disable_interrupts.S b/arch/microblaze/core/microblaze_disable_interrupts.S new file mode 100644 index 000000000000..77d66ba120ba --- /dev/null +++ b/arch/microblaze/core/microblaze_disable_interrupts.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_disable_interrupts + .ent microblaze_disable_interrupts + .balign 4 +microblaze_disable_interrupts: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrclr r0, 0x2 +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + #Read the MSR register + mfs r12, rmsr + #Clear the interrupt enable bit + andi r12, r12, ~(0x2) + #Save the MSR register + mts rmsr, r12 + #Return + rtsd r15, 8 + nop +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + .end microblaze_disable_interrupts diff --git a/arch/microblaze/core/microblaze_enable_dcache.S b/arch/microblaze/core/microblaze_enable_dcache.S new file mode 100644 index 000000000000..8b77a326c024 --- /dev/null +++ b/arch/microblaze/core/microblaze_enable_dcache.S @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_enable_dcache + .ent microblaze_enable_dcache + .balign 4 +microblaze_enable_dcache: + +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrset r0, 0x80 +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + #Read the MSR register + mfs r8, rmsr + #Set the interrupt enable bit + ori r8, r8, 0x80 + #Save the MSR register + mts rmsr, r8 + #Return + rtsd r15, 8 + nop +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + .end microblaze_enable_dcache diff --git a/arch/microblaze/core/microblaze_enable_exceptions.S b/arch/microblaze/core/microblaze_enable_exceptions.S new file mode 100644 index 000000000000..9d79c13431cc --- /dev/null +++ b/arch/microblaze/core/microblaze_enable_exceptions.S @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_enable_exceptions + .ent microblaze_enable_exceptions + .balign 4 +microblaze_enable_exceptions: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8; + msrset r0, 0x100 +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + mfs r4, rmsr; + /* Turn ON the EE bit */ + ori r4, r4, 0x100; + mts rmsr, r4; + rtsd r15, 8; + nop; +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ +.end microblaze_enable_exceptions diff --git a/arch/microblaze/core/microblaze_enable_icache.S b/arch/microblaze/core/microblaze_enable_icache.S new file mode 100644 index 000000000000..8ae9e3f3bc2f --- /dev/null +++ b/arch/microblaze/core/microblaze_enable_icache.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_enable_icache + .ent microblaze_enable_icache + .balign 4 +microblaze_enable_icache: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrset r0, 0x20 +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + #Read the MSR register + mfs r8, rmsr + #Set the interrupt enable bit + ori r8, r8, 0x20 + #Save the MSR register + mts rmsr, r8 + #Return + rtsd r15, 8 + nop +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + .end microblaze_enable_icache diff --git a/arch/microblaze/core/microblaze_enable_interrupts.S b/arch/microblaze/core/microblaze_enable_interrupts.S new file mode 100644 index 000000000000..e32ef8fb0bc0 --- /dev/null +++ b/arch/microblaze/core/microblaze_enable_interrupts.S @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + .text + .globl microblaze_enable_interrupts + .ent microblaze_enable_interrupts + .balign 4 +microblaze_enable_interrupts: +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + rtsd r15, 8 + msrset r0, 0x2 + nop +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + #Read the MSR register + mfs r12, rmsr + #Set the interrupt enable bit + ori r12, r12, 0x2 + #Save the MSR register + mts rmsr, r12 + #Return + rtsd r15, 8 + nop +#endif /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + .end microblaze_enable_interrupts diff --git a/arch/microblaze/core/offsets/offsets.c b/arch/microblaze/core/offsets/offsets.c new file mode 100644 index 000000000000..001099dbbb1f --- /dev/null +++ b/arch/microblaze/core/offsets/offsets.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include + +GEN_OFFSET_SYM(_callee_saved_t, r1); +GEN_OFFSET_SYM(_callee_saved_t, key); +GEN_OFFSET_SYM(_callee_saved_t, retval); +GEN_OFFSET_SYM(_callee_saved_t, preempted); + +GEN_OFFSET_STRUCT(arch_esf, r31); +GEN_OFFSET_STRUCT(arch_esf, r30); +GEN_OFFSET_STRUCT(arch_esf, r29); +GEN_OFFSET_STRUCT(arch_esf, r28); +GEN_OFFSET_STRUCT(arch_esf, r27); +GEN_OFFSET_STRUCT(arch_esf, r26); +GEN_OFFSET_STRUCT(arch_esf, r25); +GEN_OFFSET_STRUCT(arch_esf, r24); +GEN_OFFSET_STRUCT(arch_esf, r23); +GEN_OFFSET_STRUCT(arch_esf, r22); +GEN_OFFSET_STRUCT(arch_esf, r21); +GEN_OFFSET_STRUCT(arch_esf, r20); +GEN_OFFSET_STRUCT(arch_esf, r19); +GEN_OFFSET_STRUCT(arch_esf, r18); +GEN_OFFSET_STRUCT(arch_esf, r17); +GEN_OFFSET_STRUCT(arch_esf, r16); +GEN_OFFSET_STRUCT(arch_esf, r15); +GEN_OFFSET_STRUCT(arch_esf, r14); +GEN_OFFSET_STRUCT(arch_esf, r13); +GEN_OFFSET_STRUCT(arch_esf, r12); +GEN_OFFSET_STRUCT(arch_esf, r11); +GEN_OFFSET_STRUCT(arch_esf, r10); +GEN_OFFSET_STRUCT(arch_esf, r11); +GEN_OFFSET_STRUCT(arch_esf, r10); +GEN_OFFSET_STRUCT(arch_esf, r9); +GEN_OFFSET_STRUCT(arch_esf, r8); +GEN_OFFSET_STRUCT(arch_esf, r7); +GEN_OFFSET_STRUCT(arch_esf, r6); +GEN_OFFSET_STRUCT(arch_esf, r5); +GEN_OFFSET_STRUCT(arch_esf, r4); +GEN_OFFSET_STRUCT(arch_esf, r3); +GEN_OFFSET_STRUCT(arch_esf, r2); +GEN_OFFSET_STRUCT(arch_esf, msr); +#if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) +GEN_OFFSET_STRUCT(arch_esf, fsr); +#endif + +GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, STACK_ROUND_UP(sizeof(struct arch_esf))); + +GEN_ABS_SYM_END diff --git a/arch/microblaze/core/prep_c.c b/arch/microblaze/core/prep_c.c new file mode 100644 index 000000000000..ad6ead485c12 --- /dev/null +++ b/arch/microblaze/core/prep_c.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#include +#include +#include +#include + +#include + +/** + * + * @brief Prepare to and run C code + * + * This routine prepares for the execution of and runs C code. + * + * @return N/A + */ + +void _PrepC(void) +{ + microblaze_disable_interrupts(); + +#if defined(CONFIG_CACHE_MANAGEMENT) +#if defined(CONFIG_ICACHE) + cache_instr_enable(); +#endif +#if defined(CONFIG_DCACHE) + cache_data_enable(); +#endif +#endif + + z_bss_zero(); + + z_cstart(); + CODE_UNREACHABLE; +} + +/** + * + * @brief Re-enable interrupts after kernel is initialised + * + * @return 0 + */ +static int interrupt_init_post_kernel(void) +{ + microblaze_enable_interrupts(); + return 0; +} + +SYS_INIT(interrupt_init_post_kernel, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); diff --git a/arch/microblaze/core/reboot.c b/arch/microblaze/core/reboot.c new file mode 100644 index 000000000000..174e2a18d060 --- /dev/null +++ b/arch/microblaze/core/reboot.c @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +/** + * @brief Reset the system + * + * This is stub function to avoid build error with CONFIG_REBOOT=y + * MicroBlaze, being a soft core cannot define a reboot routine. + * Each SoC designer should implement their own reboot if needed. + */ + +void __weak sys_arch_reboot(int type) +{ + printk("__weak reboot called with %d in %s\n", type, __FILE__); + __asm__ volatile("\tbraid _start\n" + "\tmts rmsr, r0\n"); +} diff --git a/arch/microblaze/core/reset.S b/arch/microblaze/core/reset.S new file mode 100644 index 000000000000..b7a0f949e91f --- /dev/null +++ b/arch/microblaze/core/reset.S @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include + +// import +GTEXT(_start) +// export +GTEXT(__start) + + .globl _reset + .section .vectors.reset, "ax" + .balign 4 + .ent _reset + .type _reset, @function +_reset: +__start: + brai _start + .end _reset diff --git a/arch/microblaze/core/sbrk.c b/arch/microblaze/core/sbrk.c new file mode 100644 index 000000000000..e1535ee6349c --- /dev/null +++ b/arch/microblaze/core/sbrk.c @@ -0,0 +1,32 @@ + +#include +#include + +#define _DDR_NODE DT_CHOSEN(zephyr_sram) +#define _LAYOUT_DDR_LOC DT_REG_ADDR(_DDR_NODE) +#define _LAYOUT_DDR_SIZE DT_REG_SIZE(_DDR_NODE) + +/* Current offset from HEAP_BASE of unused memory */ +__attribute__((section(".bss"), used)) static size_t heap_sz; + +#define HEAP_BASE ((uintptr_t) (&_end)) +#define MAX_HEAP_SIZE (_LAYOUT_DDR_LOC + _LAYOUT_DDR_SIZE - HEAP_BASE) + +/* Implementation stolen from newlib/libc-hooks.c */ +void *_sbrk(intptr_t count) +{ + void *ret, *ptr; + + ptr = ((char *)HEAP_BASE) + heap_sz; + + if ((heap_sz + count) < MAX_HEAP_SIZE) { + heap_sz += count; + ret = ptr; + + } else { + ret = (void *)-1; + } + + return ret; +} +__weak FUNC_ALIAS(_sbrk, sbrk, void *); \ No newline at end of file diff --git a/arch/microblaze/core/swap.S b/arch/microblaze/core/swap.S new file mode 100644 index 000000000000..c3618add8a7c --- /dev/null +++ b/arch/microblaze/core/swap.S @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include +#include +#include + +/* exports */ +.global arch_swap + +/* imports */ +.extern _k_neg_eagain +.extern _asm_stack_failed + + +/* unsigned int arch_swap(unsigned int key) + * + * Always called with interrupts locked. + * Must always be called with a delayed branch instruction! + */ +SECTION_FUNC(exception.other, arch_swap) + + /* Make room for the context on the stack. */ + STACK_ALLOC(__struct_arch_esf_SIZEOF) + ASSERT_GT_ZERO(r1, _asm_stack_failed) + + /* Populate default return value */ + LOAD_REG_FROM_ADDR(r3, _k_neg_eagain) + + PUSH_CONTEXT_TO_STACK(r31) + PUSH_CONTEXT_TO_STACK(r30) + PUSH_CONTEXT_TO_STACK(r29) + PUSH_CONTEXT_TO_STACK(r28) + PUSH_CONTEXT_TO_STACK(r27) + PUSH_CONTEXT_TO_STACK(r26) + PUSH_CONTEXT_TO_STACK(r25) + PUSH_CONTEXT_TO_STACK(r24) + PUSH_CONTEXT_TO_STACK(r23) + PUSH_CONTEXT_TO_STACK(r22) + PUSH_CONTEXT_TO_STACK(r21) + PUSH_CONTEXT_TO_STACK(r20) + PUSH_CONTEXT_TO_STACK(r19) + PUSH_CONTEXT_TO_STACK(r18) + PUSH_CONTEXT_TO_STACK(r17) + PUSH_CONTEXT_TO_STACK(r16) + /* Stack the return address from user sub-routines */ + PUSH_CONTEXT_TO_STACK(r15) + /* The interrupts will always save the "next instruction to execute" to r14. + * This isn't the same as r15 which is a link to the calling instruction. + * Because there's a chance we may return from an ISR, we store r15+8 to r14, + * so that r14 always has the next instruction to execute. And we make both + * the swap and isr routines return to r14. Therefore, arch_swap should never + * be called via an undelayed branch instruction! + */ + addik r14, r15, 8 + /* Stack the return address from interrupt */ + PUSH_CONTEXT_TO_STACK(r14) + PUSH_CONTEXT_TO_STACK(r13) + PUSH_CONTEXT_TO_STACK(r12) + PUSH_CONTEXT_TO_STACK(r11) + PUSH_CONTEXT_TO_STACK(r10) + PUSH_CONTEXT_TO_STACK(r9) + PUSH_CONTEXT_TO_STACK(r8) + PUSH_CONTEXT_TO_STACK(r7) + PUSH_CONTEXT_TO_STACK(r6) + PUSH_CONTEXT_TO_STACK(r5) + PUSH_CONTEXT_TO_STACK(r4) + PUSH_CONTEXT_TO_STACK(r3) + PUSH_CONTEXT_TO_STACK(r2) + + /* Stack MSR using r11(temp1) */ + mfs TEMP_DATA_REG, rmsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(msr)) + + #if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + /* Stack FSR using TEMP_DATA_REG(temp1) */ + mfs TEMP_DATA_REG, rfsr + STORE_TO_STACK(TEMP_DATA_REG, ESF_OFFSET(fsr)) + #endif + +#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) + CALL(z_thread_mark_switched_out, \ + DELAY_SLOT(nop)) + /* Need to preserve r3-retval as we're about to store it */ + POP_CONTEXT_FROM_STACK(r3) + /* Need to preserve r5-arg1 as it has the function argument. */ + POP_CONTEXT_FROM_STACK(r5) +#endif + + /* Get a reference to _kernel again in r11 (temp1) -> r11 = &_kernel */ + SET_REG(KERNEL_REF_REG, _kernel) + /* Get a reference to current thread in r12 (temp2): r12 = *(&kernel+offsetof(current)) */ + LOAD_CURRENT_THREAD(CURRENT_THREAD_REG) + /* Save the stack pointer to current thread */ + STORE_TO_CURRENT_THREAD(r1, _thread_offset_to_r1) + /* r5 has the 'key' argument which is the result of irq_lock() before this was called */ + STORE_TO_CURRENT_THREAD(r5, _thread_offset_to_key) + /* Write 0 to preempted to indicate this thread yielded from arch_swap */ + STORE_TO_CURRENT_THREAD(r0, _thread_offset_to_preempted) + /* Also store the default retval to thread */ + STORE_TO_CURRENT_THREAD(r3, _thread_offset_to_retval) + + /* Get a reference to ready_q.cache in r4 (retval1) *(&_kernel+offsetof(ready_q.cache)) */ + LOAD_NEXT_THREAD(NEXT_THREAD_REG) + /* The thread to be swapped in is now the current thread */ + WRITE_TO_KERNEL_CURRENT(NEXT_THREAD_REG) + /* Grab the stack pointer from "new" current thread */ + LOAD_FROM_NEXT_THREAD(r1, _thread_offset_to_r1) + +#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) + /* Preserve r4 */ + STACK_ALLOC(4) + STORE_TO_STACK(NEXT_THREAD_REG, 0) + + CALL(z_thread_mark_switched_in, \ + DELAY_SLOT(nop)) + + LOAD_FROM_STACK(NEXT_THREAD_REG, 0) + STACK_FREE(4) +#endif + /* r1 (sp) now points to new thread's stack */ + +_arch_swap_restore_and_exit: + + POP_CONTEXT_FROM_STACK(r31) + POP_CONTEXT_FROM_STACK(r30) + POP_CONTEXT_FROM_STACK(r29) + POP_CONTEXT_FROM_STACK(r28) + POP_CONTEXT_FROM_STACK(r27) + POP_CONTEXT_FROM_STACK(r26) + POP_CONTEXT_FROM_STACK(r25) + POP_CONTEXT_FROM_STACK(r24) + POP_CONTEXT_FROM_STACK(r23) + POP_CONTEXT_FROM_STACK(r22) + POP_CONTEXT_FROM_STACK(r21) + POP_CONTEXT_FROM_STACK(r20) + POP_CONTEXT_FROM_STACK(r19) + POP_CONTEXT_FROM_STACK(r18) + POP_CONTEXT_FROM_STACK(r17) + POP_CONTEXT_FROM_STACK(r16) + POP_CONTEXT_FROM_STACK(r15) + POP_CONTEXT_FROM_STACK(r14) + POP_CONTEXT_FROM_STACK(r13) + POP_CONTEXT_FROM_STACK(r12) + POP_CONTEXT_FROM_STACK(r11) + POP_CONTEXT_FROM_STACK(r10) + POP_CONTEXT_FROM_STACK(r9) + POP_CONTEXT_FROM_STACK(r8) + POP_CONTEXT_FROM_STACK(r7) + POP_CONTEXT_FROM_STACK(r6) + POP_CONTEXT_FROM_STACK(r5) + /* r4 is next thread reg and will be return value for arch_swap; restoring it differently */ + POP_CONTEXT_FROM_STACK(r3) + POP_CONTEXT_FROM_STACK(r2) + + /* BEGIN restore carry bit */ + LOAD_FROM_STACK(TEMP_DATA_REG, ESF_OFFSET(msr)) + MASK_BITS(TEMP_DATA_REG, MSR_C_MASK) + beqid TEMP_DATA_REG, _swap_clear_carry + mfs TEMP_DATA_REG, rmsr + +_swap_set_carry: + SET_BITS(TEMP_DATA_REG, MSR_C_MASK) + braid _swap_restore_carry + nop + +_swap_clear_carry: + CLEAR_BITS(TEMP_DATA_REG, MSR_C_MASK) + +_swap_restore_carry: + mts rmsr, TEMP_DATA_REG + /* END restore carry bit */ + + #if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + /* Reload the FSR from the stack. */ + LOAD_FROM_STACK(TEMP_DATA_REG, ESF_OFFSET(fsr)) + mts rfsr, TEMP_DATA_REG + #endif + + LOAD_FROM_NEXT_THREAD(TEMP_DATA_REG, _thread_offset_to_preempted) + bneid TEMP_DATA_REG, _arch_swap_check_key // skip set retval if preempted == 1 + LOAD_FROM_NEXT_THREAD(TEMP_DATA_REG, _thread_offset_to_key) + /* Load return value into r3 (returnval1). + * -EAGAIN unless someone previously called arch_thread_return_value_set(). + * Do this before we potentially unlock interrupts. */ + LOAD_FROM_NEXT_THREAD(r3, _thread_offset_to_retval) + +_arch_swap_check_key: + /* Temp data reg has the "key" */ + beqid TEMP_DATA_REG, _arch_swap_exit // skip unlock if key == 0 + /* Restore r4 before we potentially unlock interrupts. */ + POP_CONTEXT_FROM_STACK(r4) + +_arch_swap_unlock_irq: + POP_CONTEXT_FROM_STACK(TEMP_DATA_REG) + STACK_FREE(__struct_arch_esf_SIZEOF) +/* BEGIN microblaze_enable_interrupts() */ +#if CONFIG_MICROBLAZE_USE_MSR_INSTR == 1 + /* r10 was being used as a temporary. Now restore its true value from the stack. */ + rtsd r14, 0 + msrset r0, MSR_IE_MASK +#else /*CONFIG_MICROBLAZE_USE_MSR_INSTR == 1*/ + /* r17-exception return address register should be OK to use here + * Because if we somehow manage to get an exception here, + * we probably dont plan to come back... + * Most likely exception causes: + * 1. r14 contains a poor address + * 2. We tried to write an invalid value to MSR + */ + mfs r17, rmsr + ori r17, r17, MSR_IE_MASK + rtsd r14, 0 + mts rmsr, r17 +#endif +/* END microblaze_enable_interrupts() */ + +_arch_swap_exit: + /* r10 was being used as a temporary. Now restore its true value from the stack. */ + POP_CONTEXT_FROM_STACK(TEMP_DATA_REG) + rtsd r14, 0 + STACK_FREE(__struct_arch_esf_SIZEOF) diff --git a/arch/microblaze/core/thread.c b/arch/microblaze/core/thread.c new file mode 100644 index 000000000000..f31acdce0ef6 --- /dev/null +++ b/arch/microblaze/core/thread.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#include + +void z_thread_entry(k_thread_entry_t thread, void *arg1, void *arg2, void *arg3); + +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, + k_thread_entry_t entry, void *arg1, void *arg2, void *arg3) +{ + struct arch_esf *stack_init; + + /* Initial stack frame for thread */ + stack_init = + (struct arch_esf *)Z_STACK_PTR_ALIGN(Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)); + + /* Setup the initial stack frame */ + stack_init->r5 = (uint32_t)entry; + stack_init->r6 = (uint32_t)arg1; + stack_init->r7 = (uint32_t)arg2; + stack_init->r8 = (uint32_t)arg3; + stack_init->r14 = (uint32_t)z_thread_entry; + + /* Initialise the stack to stack_init */ + thread->callee_saved.r1 = (uint32_t)stack_init; + /* Threads start with irq_unlocked */ + thread->callee_saved.key = 1; + /* and return value set to default */ + thread->callee_saved.retval = -EAGAIN; +} + +/** + * @brief can be used as a safe stub for thread_abort implementation + * + * @return FUNC_NORETURN + */ +FUNC_NORETURN void arch_thread_sleep_forever(void) +{ + k_sleep(K_FOREVER); + CODE_UNREACHABLE; +} diff --git a/arch/microblaze/include/kernel_arch_data.h b/arch/microblaze/include/kernel_arch_data.h new file mode 100644 index 000000000000..a3dfd2467f25 --- /dev/null +++ b/arch/microblaze/include/kernel_arch_data.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel structures definitions and various + * other definitions for the MIPS processor architecture. + */ + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_DATA_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_DATA_H_ + +#include +#include + +#ifndef _ASMLANGUAGE +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_DATA_H_ */ diff --git a/arch/microblaze/include/kernel_arch_func.h b/arch/microblaze/include/kernel_arch_func.h new file mode 100644 index 000000000000..2f534de76d52 --- /dev/null +++ b/arch/microblaze/include/kernel_arch_func.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel function/macro definitions and various + * other definitions for the MIPS processor architecture. + */ + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_FUNC_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_FUNC_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _ASMLANGUAGE +static ALWAYS_INLINE void arch_kernel_init(void) +{ +} + +static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +{ + thread->callee_saved.retval = value; +} + +FUNC_NORETURN void z_microblaze_fatal_error(unsigned int reason, const struct arch_esf *esf); + +static inline bool arch_is_in_isr(void) +{ + return _kernel.cpus[0].nested != 0U; +} + +#ifdef CONFIG_IRQ_OFFLOAD +void z_irq_do_offload(void); +#endif + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_KERNEL_ARCH_FUNC_H_ */ diff --git a/arch/microblaze/include/microblaze/emulate_isr.h b/arch/microblaze/include/microblaze/emulate_isr.h new file mode 100644 index 000000000000..c74c1504e48d --- /dev/null +++ b/arch/microblaze/include/microblaze/emulate_isr.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_EMULATE_ISR_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_EMULATE_ISR_H_ + +#include + +#include + +extern void microblaze_emulate_isr(void); + +#define EMULATE_ISR() __asm__ volatile("\tbralid r14, microblaze_emulate_isr\n\tnop\n") + +#define EMULATE_IRQ(irq) \ + do { \ + microblaze_disable_interrupts(); \ + arch_irq_set_emulated_pending(irq); \ + __asm__ volatile("\tbralid r14, microblaze_emulate_isr\n\tnop\n"); \ + } while (0) + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_EMULATE_ISR_H_ */ diff --git a/arch/microblaze/include/microblaze/mb_interface.h b/arch/microblaze/include/microblaze/mb_interface.h new file mode 100644 index 000000000000..9c43af3ecbff --- /dev/null +++ b/arch/microblaze/include/microblaze/mb_interface.h @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MB_INTERFACE_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MB_INTERFACE_H_ + +#ifdef __cplusplus +extern "C" { +#endif +/* Enable Interrupts */ +extern void microblaze_enable_interrupts(void); +/* Disable Interrupts */ +extern void microblaze_disable_interrupts(void); +/* Enable Instruction Cache */ +extern void microblaze_enable_icache(void); +/* Disable Instruction Cache */ +extern void microblaze_disable_icache(void); +/* Enable Instruction Cache */ +extern void microblaze_enable_dcache(void); +/* Disable Instruction Cache */ +extern void microblaze_disable_dcache(void); +/* Enable hardware exceptions */ +extern void microblaze_enable_exceptions(void); +/* Disable hardware exceptions */ +extern void microblaze_disable_exceptions(void); + +/* necessary for pre-processor */ +#define stringify(s) tostring(s) +#define tostring(s) #s + +/* Simplified Cache instruction macros that use only single register */ +#define wdc(v) ({ __asm__ __volatile__("wdc\t%0,r0\n" ::"d"(v)); }) +#define wdc_flush(v) ({ __asm__ __volatile__("wdc.flush\t%0,r0\n" ::"d"(v)); }) +#define wdc_clear(v) ({ __asm__ __volatile__("wdc.clear\t%0,r0\n" ::"d"(v)); }) +#define wic(v) ({ __asm__ __volatile__("wic\t%0,r0\n" ::"d"(v)); }) + +/* FSL Access Macros */ +/* Blocking Data Read and Write to FSL no. id */ +#define getfsl(val, id) asm volatile("get\t%0,rfsl" stringify(id) : "=d"(val)) +#define putfsl(val, id) asm volatile("put\t%0,rfsl" stringify(id)::"d"(val)) + +/* Non-blocking Data Read and Write to FSL no. id */ +#define ngetfsl(val, id) asm volatile("nget\t%0,rfsl" stringify(id) : "=d"(val)) +#define nputfsl(val, id) asm volatile("nput\t%0,rfsl" stringify(id)::"d"(val)) + +/* Blocking Control Read and Write to FSL no. id */ +#define cgetfsl(val, id) asm volatile("cget\t%0,rfsl" stringify(id) : "=d"(val)) +#define cputfsl(val, id) asm volatile("cput\t%0,rfsl" stringify(id)::"d"(val)) + +/* Non-blocking Control Read and Write to FSL no. id */ +#define ncgetfsl(val, id) asm volatile("ncget\t%0,rfsl" stringify(id) : "=d"(val)) +#define ncputfsl(val, id) asm volatile("ncput\t%0,rfsl" stringify(id)::"d"(val)) + +/* Polling versions of FSL access macros. This makes the FSL access interruptible */ +#define getfsl_interruptible(val, id) \ + asm volatile("\n1:\n\tnget\t%0,rfsl" stringify(id) "\n\t" \ + "addic\tr18,r0,0\n\t" \ + "bnei\tr18,1b\n" \ + : "=d"(val)::"r18") + +#define putfsl_interruptible(val, id) \ + asm volatile("\n1:\n\tnput\t%0,rfsl" stringify(id) "\n\t" \ + "addic\tr18,r0,0\n\t" \ + "bnei\tr18,1b\n" ::"d"(val) \ + : "r18") + +#define cgetfsl_interruptible(val, id) \ + asm volatile("\n1:\n\tncget\t%0,rfsl" stringify(id) "\n\t" \ + "addic\tr18,r0,0\n\t" \ + "bnei\tr18,1b\n" \ + : "=d"(val)::"r18") + +#define cputfsl_interruptible(val, id) \ + asm volatile("\n1:\n\tncput\t%0,rfsl" stringify(id) "\n\t" \ + "addic\tr18,r0,0\n\t" \ + "bnei\tr18,1b\n" ::"d"(val) \ + : "r18") +/* FSL valid and error check macros. */ +#define fsl_isinvalid(result) asm volatile("addic\t%0,r0,0" : "=d"(result)) +#define fsl_iserror(error) \ + asm volatile("mfs\t%0,rmsr\n\t" \ + "andi\t%0,%0,0x10" \ + : "=d"(error)) + +/* Pseudo assembler instructions */ +#define clz(v) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("clz\t%0,%1\n" : "=d"(_rval) : "d"(v)); \ + _rval; \ + }) + +#define mbar(mask) ({ __asm__ __volatile__("mbar\t" stringify(mask)); }) + +/* Arm like macros for calling different types of memory barriers */ +#define isb() mbar(0b01) +#define dmb() mbar(0b10) +#define dsb() mbar(0b11) +#define __ISB() isb() +#define __DMB() dmb() +#define __DSB() dsb() + +#define mb_sleep() ({ __asm__ __volatile__("sleep\t"); }) +#define mb_hibernate() ({ __asm__ __volatile__("hibernate\t"); }) +#define mb_suspend() ({ __asm__ __volatile__("suspend\t"); }) +#define mb_swapb(v) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("swapb\t%0,%1\n" : "=d"(_rval) : "d"(v)); \ + _rval; \ + }) + +#define mb_swaph(v) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("swaph\t%0,%1\n" : "=d"(_rval) : "d"(v)); \ + _rval; \ + }) + +#define mfgpr(rn) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("or\t%0,r0," stringify(rn) "\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfmsr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rmsr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfear() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rear\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfeare() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfse\t%0,rear\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfesr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,resr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mffsr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rfsr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfpvr(rn) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rpvr" stringify(rn) "\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfpvre(rn) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfse\t%0,rpvr" stringify(rn) "\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfbtr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rbtr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfedr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,redr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfpid() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rpid\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfzpr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rzpr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mftlbx() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rtlbx\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mftlblo() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rtlblo\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mftlbhi() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rtlbhi\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfslr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rslr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mfshr() \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("mfs\t%0,rshr\n" : "=d"(_rval)); \ + _rval; \ + }) + +#define mtgpr(rn, v) ({ __asm__ __volatile__("or\t" stringify(rn) ",r0,%0\n" ::"d"(v)); }) + +#define mtmsr(v) ({ __asm__ __volatile__("mts\trmsr,%0\n\tnop\n" ::"d"(v)); }) + +#define mtfsr(v) ({ __asm__ __volatile__("mts\trfsr,%0\n\tnop\n" ::"d"(v)); }) + +#define mtpid(v) ({ __asm__ __volatile__("mts\trpid,%0\n\tnop\n" ::"d"(v)); }) + +#define mtzpr(v) ({ __asm__ __volatile__("mts\trzpr,%0\n\tnop\n" ::"d"(v)); }) + +#define mttlbx(v) ({ __asm__ __volatile__("mts\trtlbx,%0\n\tnop\n" ::"d"(v)); }) + +#define mttlblo(v) ({ __asm__ __volatile__("mts\trtlblo,%0\n\tnop\n" ::"d"(v)); }) + +#define mttlbhi(v) ({ __asm__ __volatile__("mts\trtlbhi,%0\n\tnop\n" ::"d"(v)); }) + +#define mttlbsx(v) ({ __asm__ __volatile__("mts\trtlbsx,%0\n\tnop\n" ::"d"(v)); }) + +#define mtslr(v) ({ __asm__ __volatile__("mts\trslr,%0\n\tnop\n" ::"d"(v)); }) + +#define mtshr(v) ({ __asm__ __volatile__("mts\trshr,%0\n\tnop\n" ::"d"(v)); }) + +#define lwx(address) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lwx\t%0,%1,r0\n" : "=d"(_rval) : "d"(address)); \ + _rval; \ + }) + +#define lwr(address) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lwr\t%0,%1,r0\n" : "=d"(_rval) : "d"(address)); \ + _rval; \ + }) + +#define lwea(lladdr) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lwea\t%0,%M1,%L1\n" : "=d"(_rval) : "d"(lladdr)); \ + _rval; \ + }) + +#define lhur(address) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lhur\t%0,%1,r0\n" : "=d"(_rval) : "d"(address)); \ + _rval; \ + }) + +#define lhuea(lladdr) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lhuea\t%0,%M1,%L1\n" : "=d"(_rval) : "d"(lladdr)); \ + _rval; \ + }) + +#define lbur(address) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lbur\t%0,%1,r0\n" : "=d"(_rval) : "d"(address)); \ + _rval; \ + }) + +#define lbuea(lladdr) \ + ({ \ + unsigned int _rval = 0U; \ + __asm__ __volatile__("lbuea\t%0,%M1,%L1\n" : "=d"(_rval) : "d"(lladdr)); \ + _rval; \ + }) + +#define swx(address, data) ({ __asm__ __volatile__("swx\t%0,%1,r0\n" ::"d"(data), "d"(address)); }) + +#define swr(address, data) ({ __asm__ __volatile__("swr\t%0,%1,r0\n" ::"d"(data), "d"(address)); }) + +#define swea(lladdr, data) \ + ({ __asm__ __volatile__("swea\t%0,%M1,%L1\n" ::"d"(data), "d"(lladdr)); }) + +#define shr(address, data) ({ __asm__ __volatile__("shr\t%0,%1,r0\n" ::"d"(data), "d"(address)); }) + +#define shea(lladdr, data) \ + ({ __asm__ __volatile__("shea\t%0,%M1,%L1\n" ::"d"(data), "d"(lladdr)); }) + +#define sbr(address, data) ({ __asm__ __volatile__("sbr\t%0,%1,r0\n" ::"d"(data), "d"(address)); }) + +#define sbea(lladdr, data) \ + ({ __asm__ __volatile__("sbea\t%0,%M1,%L1\n" ::"d"(data), "d"(lladdr)); }) + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MB_INTERFACE_H_ */ diff --git a/arch/microblaze/include/microblaze/microblaze_asm.h b/arch/microblaze/include/microblaze/microblaze_asm.h new file mode 100644 index 000000000000..0fff0e6ef2e7 --- /dev/null +++ b/arch/microblaze/include/microblaze/microblaze_asm.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_ASM_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_ASM_H_ + +#define KERNEL_REF_REG r11 +#define CURRENT_THREAD_REG r12 +#define NEXT_THREAD_REG r4 +#define TEMP_DATA_REG r10 +#define ADD_IMM(rx, imm) addik rx, rx, imm +#define SUB_IMM(rx, imm) addik rx, rx, -(imm) +#define SET_BITS(rd, mask) ori rd, rd, mask +#define CLEAR_BITS(rd, mask) andi rd, rd, ~(mask) +#define MASK_BITS(rd, mask) andi rd, rd, mask +#define COPY_REG(rd, rx) ori rd, rx, 0 +#define SET_REG(rd, imm) ori rd, r0, imm +#define STORE(rx, rd, imm) swi rx, rd, imm +#define LOAD(rx, rd, imm) lwi rx, rd, imm +#define STORE_REG_TO_ADDR(rx, imm) STORE(rx, r0, imm) +#define LOAD_REG_FROM_ADDR(rx, imm) LOAD(rx, r0, imm) +#define STORE_TO_STACK(rx, imm) STORE(rx, r1, imm) +#define PUSH_CONTEXT_TO_STACK(rx) STORE_TO_STACK(rx, ESF_OFFSET(rx)) +#define POP_CONTEXT_FROM_STACK(rx) LOAD_FROM_STACK(rx, ESF_OFFSET(rx)) +#define STACK_ALLOC(imm) SUB_IMM(r1, imm) +#define STACK_FREE(imm) ADD_IMM(r1, imm) +#define LOAD_FROM_STACK(rx, imm) LOAD(rx, r1, imm) +#define LOAD_FROM_KERNEL(rd, offset) LOAD(rd, KERNEL_REF_REG, offset) +#define SWITCH_TO_IRQ_STACK(rx) LOAD_FROM_KERNEL(rx, _kernel_offset_to_irq_stack) +#define LOAD_CURRENT_THREAD(rx) LOAD_FROM_KERNEL(rx, _kernel_offset_to_current) +#define LOAD_NEXT_THREAD(rx) LOAD_FROM_KERNEL(rx, _kernel_offset_to_ready_q_cache) +#define WRITE_TO_KERNEL_CURRENT(rx) STORE(rx, KERNEL_REF_REG, _kernel_offset_to_current) +#define STORE_TO_CURRENT_THREAD(rx, offset) STORE(rx, CURRENT_THREAD_REG, offset) +#define LOAD_FROM_CURRENT_THREAD(rx, offset) LOAD(rx, CURRENT_THREAD_REG, offset) +#define LOAD_FROM_NEXT_THREAD(rx, offset) LOAD(rx, NEXT_THREAD_REG, offset) +#define DELAY_SLOT(instr, ...) instr __VA_ARGS__ +#define JUMP(target, dslot) \ + brid target; \ + dslot; +#define CALL(target, dslot) \ + brlid r15, target; \ + dslot; +#define JUMP_IF_ZERO(rx, target, dslot) \ + beqid rx, target; \ + dslot; +#define JUMP_IF_NONZERO(rx, target, dslot) \ + bneid rx, target; \ + dslot; +/* "assert" macro is written for checking stack overflows; not advised to use for other purposes */ +#define ASSERT_GT_ZERO(rx, target) \ + bgti rx, 4 * (5 + 1); \ + mfs r17, rmsr; \ + ori r17, r17, MSR_EIP_MASK; \ + mts rmsr, r17; \ + bralid r17, target; \ + nop; + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_ASM_H_ */ diff --git a/arch/microblaze/include/microblaze/microblaze_regs.h b/arch/microblaze/include/microblaze/microblaze_regs.h new file mode 100644 index 000000000000..46c02143e005 --- /dev/null +++ b/arch/microblaze/include/microblaze/microblaze_regs.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#ifndef _ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_REGS_H_ +#define _ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_REGS_H_ + +#define CAUSE_EXP_MASK 0x0000001f +#define CAUSE_EXP_SHIFT 0 + +#define MSR_IE_BIT (31 - 30) +#define MSR_IE_MASK (1 << MSR_IE_BIT) +#define MSR_EE_BIT (31 - 23) +#define MSR_EE_MASK (1 << MSR_EE_BIT) +#define MSR_C_BIT (31 - 29) +#define MSR_C_MASK (1 << MSR_C_BIT) +#define MSR_EIP_BIT (31 - 22) +#define MSR_EIP_MASK (1 << MSR_EIP_BIT) +#define MSR_BIP_BIT (31 - 28) +#define MSR_BIP_MASK (1 << MSR_BIP_BIT) + +#define ESF_OFFSET(rx) __struct_arch_esf_##rx##_OFFSET + +#define XIL_EXCEPTION_ID_FIRST 0U +#define XIL_EXCEPTION_ID_FSL 0U +#define XIL_EXCEPTION_ID_UNALIGNED_ACCESS 1U +#define XIL_EXCEPTION_ID_ILLEGAL_OPCODE 2U +#define XIL_EXCEPTION_ID_M_AXI_I_EXCEPTION 3U +#define XIL_EXCEPTION_ID_IPLB_EXCEPTION 3U +#define XIL_EXCEPTION_ID_M_AXI_D_EXCEPTION 4U +#define XIL_EXCEPTION_ID_DPLB_EXCEPTION 4U +#define XIL_EXCEPTION_ID_DIV_BY_ZERO 5U +#define XIL_EXCEPTION_ID_FPU 6U +#define XIL_EXCEPTION_ID_STACK_VIOLATION 7U +#define XIL_EXCEPTION_ID_MMU 7U +#define XIL_EXCEPTION_ID_LAST XIL_EXCEPTION_ID_MMU + +#endif /* _ZEPHYR_ARCH_MICROBLAZE_INCLUDE_MICROBLAZE_MICROBLAZE_REGS_H_ */ diff --git a/arch/microblaze/include/offsets_short_arch.h b/arch/microblaze/include/offsets_short_arch.h new file mode 100644 index 000000000000..ebb1a4e8faa4 --- /dev/null +++ b/arch/microblaze/include/offsets_short_arch.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_OFFSETS_SHORT_ARCH_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_OFFSETS_SHORT_ARCH_H_ + +#include + +#define _thread_offset_to_r1 (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r1_OFFSET) + +#define _thread_offset_to_key (___thread_t_callee_saved_OFFSET + ___callee_saved_t_key_OFFSET) + +#define _thread_offset_to_retval (___thread_t_callee_saved_OFFSET + ___callee_saved_t_retval_OFFSET) + +#define _thread_offset_to_preempted \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_preempted_OFFSET) + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_OFFSETS_SHORT_ARCH_H_ */ diff --git a/boards/qemu/microblaze/Kconfig b/boards/qemu/microblaze/Kconfig new file mode 100644 index 000000000000..d89bec3fe675 --- /dev/null +++ b/boards/qemu/microblaze/Kconfig @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +config BOARD_QEMU_MICROBLAZE + select QEMU_TARGET diff --git a/boards/qemu/microblaze/Kconfig.defconfig b/boards/qemu/microblaze/Kconfig.defconfig new file mode 100644 index 000000000000..50e4512a5b78 --- /dev/null +++ b/boards/qemu/microblaze/Kconfig.defconfig @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +if BOARD_QEMU_MICROBLAZE + +config BUILD_OUTPUT_BIN + default n + +endif # BOARD_QEMU_MICROBLAZE diff --git a/boards/qemu/microblaze/Kconfig.qemu_microblaze b/boards/qemu/microblaze/Kconfig.qemu_microblaze new file mode 100644 index 000000000000..829170752801 --- /dev/null +++ b/boards/qemu/microblaze/Kconfig.qemu_microblaze @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +config BOARD_QEMU_MICROBLAZE + select SOC_XLNX_MICROBLAZE_DEMO diff --git a/boards/qemu/microblaze/board-qemu-microblaze-demo.dtb b/boards/qemu/microblaze/board-qemu-microblaze-demo.dtb new file mode 100644 index 000000000000..b64b4b916127 Binary files /dev/null and b/boards/qemu/microblaze/board-qemu-microblaze-demo.dtb differ diff --git a/boards/qemu/microblaze/board.cmake b/boards/qemu/microblaze/board.cmake new file mode 100644 index 000000000000..a7590100f9e3 --- /dev/null +++ b/boards/qemu/microblaze/board.cmake @@ -0,0 +1,37 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +set(SUPPORTED_EMU_PLATFORMS qemu) +set(QEMU_ARCH xilinx-microblazeel) + +set(QEMU_CPU_TYPE_${ARCH} microblaze) + +set(QEMU_FLAGS_${ARCH} + -machine microblaze-fdt + -nographic + -hw-dtb ${ZEPHYR_BASE}/boards/qemu/${ARCH}/board-qemu-microblaze-demo.dtb + # TODO: introduce a feature flags for debug flags + # -D /scratch/esnap_asayin/mb.log # if you enable anything from below, you probably want this too. + # -d int # show interrupts/exceptions in short format + # -d in_asm # show target assembly code for each compiled TB + # -d exec # show trace before each executed TB (lots of logs) + # --trace "memory_region_ops_*" + # --trace "exec_tb*" +) + +set(QEMU_KERNEL_OPTION + -kernel \$ +) + +board_set_debugger_ifnset(qemu) + +add_custom_target(debug_qemu + COMMAND + ${CROSS_COMPILE}gdb + -ex \"target extended-remote 127.0.0.1:${DEBUGSERVER_LISTEN_PORT}\" + -ex \"set disassemble-next-line on\" + ${APPLICATION_BINARY_DIR}/zephyr/${KERNEL_ELF_NAME} + USES_TERMINAL +) diff --git a/boards/qemu/microblaze/board.yml b/boards/qemu/microblaze/board.yml new file mode 100644 index 000000000000..f4059dd04053 --- /dev/null +++ b/boards/qemu/microblaze/board.yml @@ -0,0 +1,5 @@ +board: + name: qemu_microblaze + vendor: xlnx + socs: + - name: microblaze_demo diff --git a/boards/qemu/microblaze/doc/index.rst b/boards/qemu/microblaze/doc/index.rst new file mode 100644 index 000000000000..662619065040 --- /dev/null +++ b/boards/qemu/microblaze/doc/index.rst @@ -0,0 +1,55 @@ +.. _qemu_microblaze: + +Microblaze Emulation (QEMU) +########################### + +Overview +******** + +The Microblaze QEMU board configuration is used to emulate the Microblaze architecture. +The Microblaze QEMU machine instantiates its peripherals using a Devicetree Blob (DTB) +file located at `boards/microblaze/qemu_microblaze/board-qemu-microblaze-demo.dtb`. +This file has been produced by compiling the QEMU system devicetree inside +`boards/microblaze/qemu_microblaze/hw-dtb`. This directory also includes a Makefile +and a README explaining the compilation process to produce the DTB file. +For the applications to work properly, Zephyr devicetree and QEMU system devicetree must match. + +Programming and Debugging +************************* + +Applications for the ``qemu_microblaze`` board configuration can be built and run in +the usual way for emulated boards (see :ref:`build_an_application` and +:ref:`application_run` for more details). + +Flashing +======== + +While this board is emulated and you can't "flash" it, you can use this +configuration to run basic Zephyr applications and kernel tests in the QEMU +emulated environment. For example, with the :ref:`synchronization_sample`: + +.. zephyr-app-commands:: + :zephyr-app: samples/synchronization + :host-os: unix + :board: qemu_microblaze + :goals: run + +This will build an image with the synchronization sample app, boot it using +QEMU, and display the following console output: + +.. code-block:: console + + *** Booting Zephyr OS build v3.4.0-rc3-271-g230a121f6740 *** + thread_a: Hello World from cpu 0 on qemu_microblaze! + thread_b: Hello World from cpu 0 on qemu_microblaze! + thread_a: Hello World from cpu 0 on qemu_microblaze! + thread_b: Hello World from cpu 0 on qemu_microblaze! + thread_a: Hello World from cpu 0 on qemu_microblaze! + thread_b: Hello World from cpu 0 on qemu_microblaze! + +Exit QEMU by pressing :kbd:`CTRL+A` :kbd:`x`. + +Debugging +========= + +Refer to the detailed overview about :ref:`application_debugging`. diff --git a/boards/qemu/microblaze/hw-dtb/Makefile b/boards/qemu/microblaze/hw-dtb/Makefile new file mode 100644 index 000000000000..6af8f55cebdd --- /dev/null +++ b/boards/qemu/microblaze/hw-dtb/Makefile @@ -0,0 +1,54 @@ +# Copyright (c) 2016 Xilinx Inc +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 +# +# Makefile to build the device trees +# + +OUTDIR ?= ./ +GCC ?= gcc +DTC ?= dtc + +ifeq ($V,) +QUIET=@ +.SILENT: +else +QUIET= +endif + +SINGLE_ARCH_OUTDIR := $(OUTDIR)/LATEST + +DTS_FILES := $(wildcard *.dts) +DTSI_FILES := $(wildcard *.dtsi) +HEADER_FILES := $(wildcard *.dtsh) +HEADER_FILES += $(wildcard include/*.dtsh) + + +CPPFLAGS = -I. -Iinclude/ + +.PHONY: all source + +TARGETS = \ + $(patsubst %.dts,$(SINGLE_ARCH_OUTDIR)/%.$(1),$(DTS_FILES)) + +COMPILE = \ + $(QUIET)mkdir -p $(1); \ + $(GCC) -E -nostdinc ${CPPFLAGS} -x assembler-with-cpp $(3) -MD -MF $@.cd -o - $< | \ + $(DTC) -q -O $(2) -I dts -o $@ - -b 0; \ + cp $(SINGLE_ARCH_OUTDIR)/board-qemu-microblaze-demo.dtb ..; \ + rm -rf LATEST; + +all: $(call TARGETS,dtb) + +-include $(call TARGETS, cd) + +source: all $(call TARGETS,dts) + +$(SINGLE_ARCH_OUTDIR)/%.dtb: %.dts $(DTSI_FILES) $(HEADER_FILES) + $(call COMPILE,$(SINGLE_ARCH_OUTDIR),dtb) +$(SINGLE_ARCH_OUTDIR)/%.dts: %.dts $(DTSI_FILES) $(HEADER_FILES) + $(call COMPILE,$(SINGLE_ARCH_OUTDIR),dts) + +clean: + @rm -rf LATEST; diff --git a/boards/qemu/microblaze/hw-dtb/README b/boards/qemu/microblaze/hw-dtb/README new file mode 100644 index 000000000000..d7c59fce483c --- /dev/null +++ b/boards/qemu/microblaze/hw-dtb/README @@ -0,0 +1,10 @@ +This folder is used to build QEMU specific device trees for MicroBlaze architecture. +These device trees are used by the QEMU provided by Xilinx to internally generate machine models. + +To build the device trees: + 1. Obtain device-tree-compiler from a package manager. + 2. You must have dtc on your PATH, or specify DTC= env to a dtc executable. + 3. Run make in this directory + +This will overwrite board-microblaze-zephyr-demo.dtb in the parent directory. +The command should clean all temporary files. diff --git a/boards/qemu/microblaze/hw-dtb/board-qemu-microblaze-demo.dts b/boards/qemu/microblaze/hw-dtb/board-qemu-microblaze-demo.dts new file mode 100644 index 000000000000..f67b9c8538ad --- /dev/null +++ b/boards/qemu/microblaze/hw-dtb/board-qemu-microblaze-demo.dts @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/dts-v1/; + +/ { + cpus: cpus { + #address-cells = <1>; + #cpus = <0x1>; + #size-cells = <0>; + }; + + amba_root: amba_root { + #address-cells = <2>; + #size-cells = <2>; + #priority-cells = <1>; + + amba: amba { + compatible = "simple-bus"; + }; + }; +}; + +#include "qemu-microblaze-demo.dtsi" diff --git a/boards/qemu/microblaze/hw-dtb/include/microblaze/memmap.dtsh b/boards/qemu/microblaze/hw-dtb/include/microblaze/memmap.dtsh new file mode 100644 index 000000000000..6820dcadd860 --- /dev/null +++ b/boards/qemu/microblaze/hw-dtb/include/microblaze/memmap.dtsh @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define CPU_CLK_FREQ 200000000 +#define TMR_CLK_FREQ 50000000 + +#define MM_RAM 0x0 +#define MM_RAM_SIZE 0xF0000000 + +#define DCACHE_SIZE 0x800 +#define ICACHE_SIZE 0x2000 + +#define MM_UARTLITE_0 0xfe020000 +#define MM_UARTLITE_0_SIZE 0x10000 +#define MM_UARTLITE_0_INT 14 + +#define MM_INTC 0xfe010000 +#define MM_INTC_SIZE 0x10000 + +#define MM_TIMER_0 0xfe00c000 +#define MM_TIMER_0_SIZE 0x1000 +#define MM_TIMER_0_INT 15 + +#define VECTORS MM_RAM \ No newline at end of file diff --git a/boards/qemu/microblaze/hw-dtb/qemu-microblaze-demo.dtsi b/boards/qemu/microblaze/hw-dtb/qemu-microblaze-demo.dtsi new file mode 100644 index 000000000000..ea7a5ae08c48 --- /dev/null +++ b/boards/qemu/microblaze/hw-dtb/qemu-microblaze-demo.dtsi @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "microblaze/memmap.dtsh" + +&amba { + amba_main: amba_main { + #address-cells = <2>; + #size-cells = <2>; + #priority-cells = <1>; + compatible = "simple-bus"; + interrupt-parent = <&intc0>; + + main_ram@MM_RAM { + compatible = "qemu:memory-region"; + reg = <0x0 MM_RAM 0x0 MM_RAM_SIZE 0x1>; + qemu,ram = <1>; + }; + + intc0: interrupt-controller@MM_INTC { + compatible = "xlnx.xps-intc"; + reg = <0x0 MM_INTC 0x0 MM_INTC_SIZE 0x1>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts-extended = <&cpu0 0>; + kind-of-intr = <0x0>; + interrupt-names = "Outputs"; + label = "intc0"; + }; + + xlnx_timer@MM_TIMER_0 { + compatible = "xlnx.xps-timer"; + reg = <0x0 MM_TIMER_0 0x0 MM_TIMER_0_SIZE 0x1>; + interrupts = ; + clock-frequency = ; + }; + + uart0@MM_UARTLITE_0 { + compatible = "xlnx,xps-uartlite"; + reg = <0x0 MM_UARTLITE_0 0x0 MM_UARTLITE_0_SIZE 0x1>; + interrupts = ; + chardev = "serial0"; + }; + }; +}; + +&cpus { + cpu0: cpu@0 { + gdb-id = "CPU0"; + doc-status = "partial"; + #interrupt-cells = <1>; + clock-frequency = ; + compatible = "xlnx,microblaze-cpu"; + d-cache-size = ; + device_type = "cpu"; + i-cache-size = ; + model = "microblaze,10.0"; + version = "10.0"; + reg = <0>; + timebase-frequency = ; + xlnx,addr-tag-bits = <0x10>; + xlnx,area-optimized = <0x0>; + xlnx,avoid-primitives = <0x3>; + xlnx,base-vectors = ; + xlnx,branch-target-cache-size = <0x0>; + xlnx,d-axi = <0x1>; + xlnx,d-lmb = <0x1>; + xlnx,d-plb = <0x0>; + xlnx,data-size = <0x20>; + xlnx,debug-enabled = <0x1>; + xlnx,div-zero-exception = <0x1>; + xlnx,dynamic-bus-sizing = <0x1>; + xlnx,ecc-use-ce-exception = <0x0>; + xlnx,edge-is-positive = <0x1>; + xlnx,family = "virtex7"; + xlnx,fault-tolerant = <0x1>; + xlnx,fpu-exception = <0x0>; + xlnx,freq = ; + xlnx,fsl-data-size = <0x20>; + xlnx,fsl-exception = <0x0>; + xlnx,fsl-links = <0x0>; + xlnx,i-axi = <0x1>; + xlnx,i-lmb = <0x1>; + xlnx,i-plb = <0x0>; + xlnx,ill-opcode-exception = <0x1>; + xlnx,instance = "microblaze_1"; + xlnx,interconnect = <0x2>; + xlnx,interrupt-is-edge = <0x0>; + xlnx,lockstep-slave = <0x0>; + xlnx,mmu-dtlb-size = <0x2>; + xlnx,mmu-itlb-size = <0x4>; + xlnx,mmu-privileged-instr = <0x0>; + xlnx,mmu-tlb-access = <0x3>; + xlnx,mmu-zones = <0x2>; + xlnx,number-of-pc-brk = <0x1>; + xlnx,number-of-rd-addr-brk = <0x1>; + xlnx,number-of-wr-addr-brk = <0x1>; + xlnx,opcode-0x0-illegal = <0x1>; + xlnx,optimization = <0x0>; + xlnx,pc-width = <0x20>; + xlnx,pvr = <0x2>; + xlnx,pvr-user1 = <0x0>; + xlnx,pvr-user2 = <0x0>; + xlnx,reset-msr = <0x0>; + xlnx,sco = <0x0>; + xlnx,stream-interconnect = <0x0>; + xlnx,unaligned-exceptions = <0x1>; + xlnx,use-barrel = <0x1>; + xlnx,use-branch-target-cache = <0x0>; + xlnx,use-dcache = <0x1>; + xlnx,use-div = <0x1>; + xlnx,use-ext-brk = <0x1>; + xlnx,use-ext-nm-brk = <0x1>; + xlnx,use-extended-fsl-instr = <0x0>; + xlnx,use-fpu = <0x1>; + xlnx,use-hw-mul = <0x2>; + xlnx,use-icache = <0x1>; + xlnx,use-interrupt = <0x1>; + xlnx,use-mmu = <0x1>; + xlnx,use-msr-instr = <0x1>; + xlnx,use-pcmp-instr = <0x1>; + xlnx,use-reorder-instr = <0x1>; + xlnx,use-stack-protection = <0x1>; + xlnx,addr-size= <52>; + memory = <&amba_main>; + }; +}; diff --git a/boards/qemu/microblaze/qemu_microblaze.dts b/boards/qemu/microblaze/qemu_microblaze.dts new file mode 100644 index 000000000000..1bd6bc810551 --- /dev/null +++ b/boards/qemu/microblaze/qemu_microblaze.dts @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +/dts-v1/; + +#include + +/ { + model = "qemu_microblaze"; + compatible = "qemu,microblaze"; + + aliases { + uart-0 = &uart0; + }; + + chosen { + zephyr,sram = &ddr0; + zephyr,console = &uart0; + zephyr,shell-uart = &uart0; + }; +}; + +&uart0 { + status = "okay"; + current-speed = <115200>; +}; diff --git a/boards/qemu/microblaze/qemu_microblaze.yaml b/boards/qemu/microblaze/qemu_microblaze.yaml new file mode 100644 index 000000000000..8383d15247cb --- /dev/null +++ b/boards/qemu/microblaze/qemu_microblaze.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + +identifier: qemu_microblaze +name: QEMU Emulation for MicroBlaze +type: qemu +simulation: qemu +arch: microblaze +toolchain: + - microblaze + - zephyr +supported: + - serial +testing: + default: true + ignore_tags: + - net + - bluetooth +vendor: qemu diff --git a/boards/qemu/microblaze/qemu_microblaze_defconfig b/boards/qemu/microblaze/qemu_microblaze_defconfig new file mode 100644 index 000000000000..9da147143bb1 --- /dev/null +++ b/boards/qemu/microblaze/qemu_microblaze_defconfig @@ -0,0 +1,16 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +CONFIG_UART_XLNX_UARTLITE=y +CONFIG_UART_INTERRUPT_DRIVEN=y +CONFIG_SERIAL=y +CONFIG_CONSOLE=y +CONFIG_UART_CONSOLE=y +CONFIG_EXTRA_EXCEPTION_INFO=y +CONFIG_QEMU_ICOUNT=n +CONFIG_XLNX_INTC_INITIALIZE_IVAR_REGISTERS=y +# TX and RX IRQs are shared. Disabling one doesn't really disable any. +# Interrupt driven shell implementation doesn't play nice with this paradigm +CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN=n diff --git a/cmake/compiler/gcc/target.cmake b/cmake/compiler/gcc/target.cmake index 7e8ffc481733..8bcfd650fb13 100644 --- a/cmake/compiler/gcc/target.cmake +++ b/cmake/compiler/gcc/target.cmake @@ -78,6 +78,8 @@ elseif("${ARCH}" STREQUAL "mips") include(${CMAKE_CURRENT_LIST_DIR}/target_mips.cmake) elseif("${ARCH}" STREQUAL "xtensa") include(${CMAKE_CURRENT_LIST_DIR}/target_xtensa.cmake) +elseif("${ARCH}" STREQUAL "microblaze") + include(${CMAKE_CURRENT_LIST_DIR}/target_microblaze.cmake) endif() if(SYSROOT_DIR) diff --git a/cmake/compiler/gcc/target_microblaze.cmake b/cmake/compiler/gcc/target_microblaze.cmake new file mode 100644 index 000000000000..d52c55f30d8b --- /dev/null +++ b/cmake/compiler/gcc/target_microblaze.cmake @@ -0,0 +1,51 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + + +list(APPEND TOOLCHAIN_C_FLAGS -mcpu=${CONFIG_CPU_VERSION}) + +if(DEFINED CONFIG_MICROBLAZE_DATA_IS_TEXT_RELATIVE) + list(APPEND TOOLCHAIN_C_FLAGS -mpic-data-is-text-relative) +endif() + +if(DEFINED CONFIG_MICROBLAZE_USE_BARREL_SHIFT_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mxl-barrel-shift) +endif() + +if(DEFINED CONFIG_MICROBLAZE_USE_MUL_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mno-xl-soft-mul) +else() + list(APPEND TOOLCHAIN_C_FLAGS -mxl-soft-mul) +endif() + +if(NOT DEFINED CONFIG_MICROBLAZE_USE_PATTERN_COMPARE_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mxl-pattern-compare) +endif() + +if(DEFINED CONFIG_MICROBLAZE_USE_MULHI_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mxl-multiply-high) +endif() + +if(DEFINED CONFIG_MICROBLAZE_USE_DIV_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mno-xl-soft-div) +else() + list(APPEND TOOLCHAIN_C_FLAGS -mxl-soft-div) +endif() + +if(DEFINED CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + list(APPEND TOOLCHAIN_C_FLAGS -mhard-float) +else() + list(APPEND TOOLCHAIN_C_FLAGS -msoft-float) +endif() + + +# Common options +list(APPEND TOOLCHAIN_C_FLAGS -fdollars-in-identifiers) +# TODO: Remove this when gcc microblaze variant oddity is fixed +list(APPEND TOOLCHAIN_C_FLAGS -mlittle-endian) +list(APPEND TOOLCHAIN_LD_FLAGS -mlittle-endian) + +# string(REPLACE ";" "\n " str "${TOOLCHAIN_C_FLAGS}") +# message(STATUS "Final set of C Flags: ${str}") diff --git a/drivers/interrupt_controller/CMakeLists.txt b/drivers/interrupt_controller/CMakeLists.txt index edc0d23a633f..da939f575b52 100644 --- a/drivers/interrupt_controller/CMakeLists.txt +++ b/drivers/interrupt_controller/CMakeLists.txt @@ -43,6 +43,7 @@ zephyr_library_sources_ifdef(CONFIG_NXP_PINT intc_nxp_pint.c) zephyr_library_sources_ifdef(CONFIG_RENESAS_RA_ICU intc_renesas_ra_icu.c) zephyr_library_sources_ifdef(CONFIG_NXP_IRQSTEER intc_nxp_irqsteer.c) zephyr_library_sources_ifdef(CONFIG_INTC_MTK_ADSP intc_mtk_adsp.c) +zephyr_library_sources_ifdef(CONFIG_XLNX_INTC intc_xlnx.c) if(CONFIG_INTEL_VTD_ICTL) zephyr_library_include_directories(${ZEPHYR_BASE}/arch/x86/include) diff --git a/drivers/interrupt_controller/Kconfig b/drivers/interrupt_controller/Kconfig index 250309d20833..d21136a12889 100644 --- a/drivers/interrupt_controller/Kconfig +++ b/drivers/interrupt_controller/Kconfig @@ -108,4 +108,6 @@ source "drivers/interrupt_controller/Kconfig.nxp_irqsteer" source "drivers/interrupt_controller/Kconfig.mtk_adsp" +source "drivers/interrupt_controller/Kconfig.xlnx" + endmenu diff --git a/drivers/interrupt_controller/Kconfig.xlnx b/drivers/interrupt_controller/Kconfig.xlnx new file mode 100644 index 000000000000..104d137d5e67 --- /dev/null +++ b/drivers/interrupt_controller/Kconfig.xlnx @@ -0,0 +1,49 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +if MICROBLAZE + +config XLNX_INTC + bool + help + The AXI Interrupt Controller (INTC) works with MICROBLAZE processor. + +menu "AXI Interrupt Controller Optional Registers" + depends on XLNX_INTC + + config XLNX_INTC_USE_IPR + bool "Use Interrupt Pending Register" + help + Each bit in this register is the logical AND of the bits in the ISR and the IER. + This is an optional read-only register. Don't choose to use it if it doesn't exist. + + config XLNX_INTC_USE_SIE + bool "Use Set Interrupt Enables Register" + help + Writing a 1 to a bit location in SIE sets the corresponding bit in the IER. + This is an optional write-only register. Don't choose to use it if it doesn't exist. + + config XLNX_INTC_USE_CIE + bool "Use Clear Interrupt Enables Register" + help + Writing a 1 to a bit location in CIE clears the corresponding bit in the IER. + This is an optional write-only register. Don't choose to use it if it doesn't exist. + + config XLNX_INTC_USE_IVR + bool "Use Interrupt Vector Register" + help + The IVR contains the ordinal value of the highest priority, enabled, and active interrupt input. + The IVR acts as an index to the correct Interrupt Vector Address. + This is an optional read-only register. Don't choose to use it if it doesn't exist. + + config XLNX_INTC_INITIALIZE_IVAR_REGISTERS + bool "Initialize Interrupt Vector Address Registers" + help + The IVAR contains the addresses for fast-interrupts. This flag enables initializing all + the address registers to point them to default interrupt handler which is `0x10` for Microblaze. + +endmenu + +endif # MICROBLAZE diff --git a/drivers/interrupt_controller/intc_xlnx.c b/drivers/interrupt_controller/intc_xlnx.c new file mode 100644 index 000000000000..7c71ca006d3b --- /dev/null +++ b/drivers/interrupt_controller/intc_xlnx.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * This file implements AXI Interrupt Controller (INTC) + * For more details about the INTC see PG 099 + * + * The functionality has been based on intc_v3_12 package + * + * Right now the implementation: + * - does not support fast interrupt mode + * - does not support Cascade mode + * - does not support XIN_SVC_SGL_ISR_OPTION + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DT_DRV_COMPAT xlnx_intc + +#define SOC_INTC_DEVICE_ID 0 + +#define BASE_ADDRESS DT_INST_REG_ADDR(SOC_INTC_DEVICE_ID) +#define INTC_REG(offset) (uint32_t *)(BASE_ADDRESS + offset) + +#define xlnx_intc_read(offset) sys_read32(BASE_ADDRESS + offset) +#define xlnx_intc_write(data, offset) sys_write32(data, BASE_ADDRESS + offset) + +#define XIN_SVC_SGL_ISR_OPTION 1UL +#define XIN_SVC_ALL_ISRS_OPTION 2UL + +#define XIN_ISR_OFFSET 0 /* Interrupt Status Register */ +#define XIN_IPR_OFFSET 4 /* Interrupt Pending Register */ +#define XIN_IER_OFFSET 8 /* Interrupt Enable Register */ +#define XIN_IAR_OFFSET 12 /* Interrupt Acknowledge Register */ +#define XIN_SIE_OFFSET 16 /* Set Interrupt Enable Register */ +#define XIN_CIE_OFFSET 20 /* Clear Interrupt Enable Register */ +#define XIN_IVR_OFFSET 24 /* Interrupt Vector Register */ +#define XIN_MER_OFFSET 28 /* Master Enable Register */ +#define XIN_IMR_OFFSET 32 /* Interrupt Mode Register , this is present only for Fast Interrupt */ +#define XIN_IVAR_OFFSET \ + 0x100 /* Interrupt Vector Address Register , this is present only for Fast Interrupt */ + +/* Bit definitions for the bits of the MER register */ + +#define XIN_INT_MASTER_ENABLE_MASK 0x1UL +#define XIN_INT_HARDWARE_ENABLE_MASK 0x2UL /* once set cannot be cleared */ + +#define MICROBLAZE_INTERRUPT_VECTOR_ADDRESS 0x10 /* set it to standard interrupt vector */ + +struct xlnx_intc_state { + bool is_ready; /**< Device is initialized and ready */ + bool is_started; /**< Device has been started */ +}; + +static struct xlnx_intc_state intc_state = { + .is_ready = false, + .is_started = false, +}; + +uint32_t xlnx_intc_irq_get_enabled(void) +{ + return xlnx_intc_read(XIN_IER_OFFSET); +} + +uint32_t xlnx_intc_get_status_register(void) +{ + return xlnx_intc_read(XIN_ISR_OFFSET); +} + +uint32_t xlnx_intc_irq_pending(void) +{ +#if defined(CONFIG_XLNX_INTC_USE_IPR) + return xlnx_intc_read(XIN_IPR_OFFSET); +#else + uint32_t enabled = xlnx_intc_irq_get_enabled(); + uint32_t interrupt_status_register = xlnx_intc_get_status_register(); + + return enabled & interrupt_status_register; +#endif +} + +uint32_t xlnx_intc_irq_pending_vector(void) +{ +#if defined(CONFIG_XLNX_INTC_USE_IVR) + return xlnx_intc_read(XIN_IVR_OFFSET); +#else + return find_lsb_set(xlnx_intc_irq_pending()) - 1; +#endif +} + +void xlnx_intc_irq_enable(uint32_t irq) +{ + __ASSERT_NO_MSG(irq < 32); + + uint32_t mask = BIT(irq); + +#if defined(CONFIG_XLNX_INTC_USE_SIE) + xlnx_intc_write(mask, XIN_SIE_OFFSET); +#else + atomic_or((atomic_t *)INTC_REG(XIN_IER_OFFSET), mask); +#endif /* CONFIG_XLNX_INTC_USE_SIE */ +} + +void xlnx_intc_irq_disable(uint32_t irq) +{ + __ASSERT_NO_MSG(irq < 32); + + uint32_t mask = BIT(irq); + +#if defined(CONFIG_XLNX_INTC_USE_CIE) + xlnx_intc_write(mask, XIN_CIE_OFFSET); +#else + atomic_and((atomic_t *)INTC_REG(XIN_IER_OFFSET), ~mask); +#endif /* CONFIG_XLNX_INTC_USE_CIE */ +} + +void xlnx_intc_irq_acknowledge(uint32_t mask) +{ + xlnx_intc_write(mask, XIN_IAR_OFFSET); +} + +int32_t xlnx_intc_controller_init(uint16_t device_id) +{ + if (intc_state.is_started == true) { + return -EEXIST; + } + + /* + * Disable IRQ output signal + * Disable all interrupt sources + * Acknowledge all sources + * Disable fast interrupt mode + */ + xlnx_intc_write(0, XIN_MER_OFFSET); + xlnx_intc_write(0, XIN_IER_OFFSET); + xlnx_intc_write(0xFFFFFFFF, XIN_IAR_OFFSET); + +#if defined(CONFIG_XLNX_INTC_INITIALIZE_IVAR_REGISTERS) + xlnx_intc_write(0, XIN_IMR_OFFSET); + + for (int idx = 0; idx < 32; idx++) { + xlnx_intc_write(0x10, XIN_IVAR_OFFSET + (idx * 4)); + } +#endif + + intc_state.is_ready = true; + + return 0; +} + +int32_t xlnx_intc_irq_start(void) +{ + if (intc_state.is_started != false) { + return -EEXIST; + } + if (intc_state.is_ready != true) { + return -ENOENT; + } + + intc_state.is_started = true; + + uint32_t enable_mask = (XIN_INT_MASTER_ENABLE_MASK | XIN_INT_HARDWARE_ENABLE_MASK); + + xlnx_intc_write(enable_mask, XIN_MER_OFFSET); + + return 0; +} + +static int xlnx_intc_interrupt_init(void) +{ + int32_t status = xlnx_intc_controller_init(SOC_INTC_DEVICE_ID); + + if (status != 0) { + return status; + } + + status = xlnx_intc_irq_start(); + + return status; +} + +SYS_INIT(xlnx_intc_interrupt_init, PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY); diff --git a/drivers/timer/CMakeLists.txt b/drivers/timer/CMakeLists.txt index 56eea917d59f..779388a51e74 100644 --- a/drivers/timer/CMakeLists.txt +++ b/drivers/timer/CMakeLists.txt @@ -34,6 +34,7 @@ zephyr_library_sources_ifdef(CONFIG_SAM0_RTC_TIMER sam0_rtc_timer.c) zephyr_library_sources_ifdef(CONFIG_STM32_LPTIM_TIMER stm32_lptim_timer.c) zephyr_library_sources_ifdef(CONFIG_TI_DM_TIMER ti_dmtimer.c) zephyr_library_sources_ifdef(CONFIG_XLNX_PSTTC_TIMER xlnx_psttc_timer.c) +zephyr_library_sources_ifdef(CONFIG_XLNX_TMRCTR xlnx_tmrctr.c) zephyr_library_sources_ifdef(CONFIG_XTENSA_TIMER xtensa_sys_timer.c) zephyr_library_sources_ifdef(CONFIG_SMARTBOND_TIMER smartbond_timer.c) zephyr_library_sources_ifdef(CONFIG_MTK_ADSP_TIMER mtk_adsp_timer.c) diff --git a/drivers/timer/Kconfig b/drivers/timer/Kconfig index e0fbfa4b1af2..faffbcb44486 100644 --- a/drivers/timer/Kconfig +++ b/drivers/timer/Kconfig @@ -94,6 +94,7 @@ source "drivers/timer/Kconfig.smartbond" source "drivers/timer/Kconfig.stm32_lptim" source "drivers/timer/Kconfig.ti_dm_timer" source "drivers/timer/Kconfig.xlnx_psttc" +source "drivers/timer/Kconfig.xlnx_tmrctr" source "drivers/timer/Kconfig.xtensa" source "drivers/timer/Kconfig.mtk_adsp" source "drivers/timer/Kconfig.sy1xx_sys_timer" diff --git a/drivers/timer/Kconfig.xlnx_tmrctr b/drivers/timer/Kconfig.xlnx_tmrctr new file mode 100644 index 000000000000..337c5c890ec5 --- /dev/null +++ b/drivers/timer/Kconfig.xlnx_tmrctr @@ -0,0 +1,22 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + + + +config XLNX_TMRCTR + bool "Xilinx AXI Timer/Counter" + select TICKLESS_CAPABLE + default y if MICROBLAZE + help + This module implements a kernel device driver for the MICROBLAZE AXI Timer devices. + AXI timer is not capable of matching against a preloaded value. Thus, it's not + capable of providing TICKLESS_KERNEL. + +config XLNX_TMRCTR_TIMER_INDEX + int "Xilinx TMRCTR timer index" + default 0 + depends on XLNX_TMRCTR + help + This is the index of timer/counter picked to provide system clock. diff --git a/drivers/timer/xlnx_tmrctr.c b/drivers/timer/xlnx_tmrctr.c new file mode 100644 index 000000000000..6d30ea46305e --- /dev/null +++ b/drivers/timer/xlnx_tmrctr.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +#include +#include +#include + +#define DT_DRV_COMPAT xlnx_tmrctr + +#define IRQ_TIMER DT_INST_IRQN(CONFIG_XLNX_TMRCTR_TIMER_INDEX) +#define TIMER_CYCLES_PER_SEC DT_INST_PROP(CONFIG_XLNX_TMRCTR_TIMER_INDEX, clock_frequency) +#define BASE_ADDRESS DT_INST_REG_ADDR(0) + +#define TICK_TIMER_COUNTER_NUMBER 0U +#define SYS_CLOCK_COUNTER_NUMBER 1U + +#define TIMER_CYCLES_PER_TICK (TIMER_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC) +#define TICK_TIMER_TOP_VALUE (TIMER_CYCLES_PER_TICK - 1UL) + +#define NUM_COUNTERS 2 + +/* Register definitions */ +#define XTC_TCSR_OFFSET 0 /**< Control/Status register */ +#define XTC_TLR_OFFSET 4 /**< Load register */ +#define XTC_TCR_OFFSET 8 /**< Timer counter register */ + +/* Control status register mask */ +#define XTC_CSR_CASC_MASK 0x00000800 +#define XTC_CSR_ENABLE_ALL_MASK 0x00000400 +#define XTC_CSR_ENABLE_PWM_MASK 0x00000200 +#define XTC_CSR_INT_OCCURRED_MASK 0x00000100 +#define XTC_CSR_ENABLE_TMR_MASK 0x00000080 +#define XTC_CSR_ENABLE_INT_MASK 0x00000040 +#define XTC_CSR_LOAD_MASK 0x00000020 +#define XTC_CSR_AUTO_RELOAD_MASK 0x00000010 +#define XTC_CSR_EXT_CAPTURE_MASK 0x00000008 +#define XTC_CSR_EXT_GENERATE_MASK 0x00000004 +#define XTC_CSR_DOWN_COUNT_MASK 0x00000002 +#define XTC_CSR_CAPTURE_MODE_MASK 0x00000001 + +/* 1st counter is at offset 0, 2nd counter is at offset 16 */ +#define NUM_REGS_PER_COUNTER 16 +#define COUNTER_REG_OFFSET(idx) (NUM_REGS_PER_COUNTER * idx) + +/* + * CYCLES_NEXT_MIN must be large enough to ensure that the timer does not miss + * interrupts. This value was conservatively set, and there is room for improvement. + */ +#define CYCLES_NEXT_MIN (TIMER_CYCLES_PER_SEC / 5000) +/* We allow only half the maximum numerical range of the cycle counters so that we + * can never miss a sysclock overflow. This is also being very conservative. + */ +#define CYCLES_NEXT_MAX (0xFFFFFFFFU / 2) + +static volatile uint32_t last_cycles; + +BUILD_ASSERT(TIMER_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, + "Timer clock frequency must be greater than the system tick " + "frequency"); + +BUILD_ASSERT((TIMER_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) == 0, + "Timer clock frequency is not divisible by the system tick " + "frequency"); + +BUILD_ASSERT((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % TIMER_CYCLES_PER_SEC) == 0, + "CPU clock frequency is not divisible by the Timer clock frequency " + "frequency"); + +enum xlnx_tmrctr_state { + XLNX_TMRCTR_INIT, /* Initial (inactive) state */ + XLNX_TMRCTR_READY, /* Initialised */ + XLNX_TMRCTR_RUNNING /* Started */ +}; + +struct xlnx_tmrctr_data { + mm_reg_t base; + enum xlnx_tmrctr_state state; +}; + +struct xlnx_tmrctr_data xlnx_tmrctr = { + .base = BASE_ADDRESS, + .state = XLNX_TMRCTR_INIT, +}; + +#define xlnx_tmrctr_read32(counter_number, offset) \ + sys_read32(BASE_ADDRESS + COUNTER_REG_OFFSET(counter_number) + offset) + +#define xlnx_tmrctr_write32(counter_number, value, offset) \ + sys_write32(value, BASE_ADDRESS + COUNTER_REG_OFFSET(counter_number) + offset) + +volatile uint32_t xlnx_tmrctr_read_count(void) +{ + return xlnx_tmrctr_read32(SYS_CLOCK_COUNTER_NUMBER, XTC_TCR_OFFSET); +} + +volatile uint32_t xlnx_tmrctr_read_hw_cycle_count(void) +{ + return (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / TIMER_CYCLES_PER_SEC) * + xlnx_tmrctr_read_count(); +} + +static void xlnx_tmrctr_clear_interrupt(void) +{ + uint32_t control_status_register = + xlnx_tmrctr_read32(TICK_TIMER_COUNTER_NUMBER, XTC_TCSR_OFFSET); + + xlnx_tmrctr_write32(TICK_TIMER_COUNTER_NUMBER, + control_status_register | XTC_CSR_INT_OCCURRED_MASK, XTC_TCSR_OFFSET); +} + +static inline void xlnx_tmrctr_set_reset_value(uint8_t counter_number, uint32_t reset_value) +{ + xlnx_tmrctr_write32(counter_number, reset_value, XTC_TLR_OFFSET); +} + +static inline void xlnx_tmrctr_set_options(uint8_t counter_number, uint32_t options) +{ + xlnx_tmrctr_write32(counter_number, options, XTC_TCSR_OFFSET); +} + +#ifdef CONFIG_TICKLESS_KERNEL +static void xlnx_tmrctr_reload_tick_timer(uint32_t delta_cycles) +{ + uint32_t csr_val; + uint32_t cur_cycle_count = xlnx_tmrctr_read_count(); + + /* Ensure that the delta_cycles value meets the timing requirements */ + if (delta_cycles < CYCLES_NEXT_MIN) { + /* Don't risk missing an interrupt */ + delta_cycles = CYCLES_NEXT_MIN; + } + if (delta_cycles > CYCLES_NEXT_MAX - cur_cycle_count) { + /* Don't risk missing a sysclock overflow */ + delta_cycles = CYCLES_NEXT_MAX - cur_cycle_count; + } + + /* Write counter load value for interrupt generation */ + xlnx_tmrctr_set_reset_value(TICK_TIMER_COUNTER_NUMBER, delta_cycles); + + /* Load the load value */ + csr_val = xlnx_tmrctr_read32(TICK_TIMER_COUNTER_NUMBER, XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(TICK_TIMER_COUNTER_NUMBER, csr_val | XTC_CSR_LOAD_MASK, + XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(TICK_TIMER_COUNTER_NUMBER, csr_val, XTC_TCSR_OFFSET); +} +#endif /* CONFIG_TICKLESS_KERNEL */ + +static void xlnx_tmrctr_irq_handler(const void *unused) +{ + uint32_t cycles; + uint32_t delta_ticks; + + ARG_UNUSED(unused); + + cycles = xlnx_tmrctr_read_count(); + /* Calculate the number of ticks since last announcement */ + delta_ticks = (cycles - last_cycles) / TIMER_CYCLES_PER_TICK; + /* Update last cycles count without the rounding error */ + last_cycles += (delta_ticks * TIMER_CYCLES_PER_TICK); + + /* Announce to the kernel*/ + sys_clock_announce(delta_ticks); + + xlnx_tmrctr_clear_interrupt(); + xlnx_intc_irq_acknowledge(BIT(IRQ_TIMER)); +} + +void sys_clock_set_timeout(int32_t ticks, bool idle) +{ +#ifdef CONFIG_TICKLESS_KERNEL + uint32_t cycles; + uint32_t delta_cycles; + + /* Read counter value */ + cycles = xlnx_tmrctr_read_count(); + + /* Calculate timeout counter value */ + if (ticks == K_TICKS_FOREVER) { + delta_cycles = CYCLES_NEXT_MAX; + } else { + delta_cycles = ((uint32_t)ticks * TIMER_CYCLES_PER_TICK); + } + + /* Set timer reload value for the next interrupt */ + xlnx_tmrctr_reload_tick_timer(delta_cycles); +#endif +} + +uint32_t sys_clock_elapsed(void) +{ +#ifdef CONFIG_TICKLESS_KERNEL + uint32_t cycles = xlnx_tmrctr_read_count(); + + return (cycles - last_cycles) / TIMER_CYCLES_PER_TICK; +#else + /* Always return 0 for tickful operation */ + return 0; +#endif +} + +uint32_t sys_clock_cycle_get_32(void) +{ + return xlnx_tmrctr_read_hw_cycle_count(); +} + +static int xlnx_tmrctr_initialize(void) +{ + if (xlnx_tmrctr.state != XLNX_TMRCTR_INIT) { + return -EEXIST; + } + + xlnx_tmrctr.state = XLNX_TMRCTR_READY; + + for (uint8_t counter_number = 0; counter_number < NUM_COUNTERS; counter_number++) { + /* Set the compare register to 0. */ + xlnx_tmrctr_write32(counter_number, 0, XTC_TLR_OFFSET); + /* Reset the timer and the interrupt. */ + xlnx_tmrctr_write32(counter_number, XTC_CSR_INT_OCCURRED_MASK | XTC_CSR_LOAD_MASK, + XTC_TCSR_OFFSET); + /* Release the reset. */ + xlnx_tmrctr_write32(counter_number, 0, XTC_TCSR_OFFSET); + } + + return 0; +} + +static int xlnx_tmrctr_start(void) +{ + if (xlnx_tmrctr.state == XLNX_TMRCTR_INIT) { + return -ENODEV; + } + if (xlnx_tmrctr.state == XLNX_TMRCTR_RUNNING) { + return -EALREADY; + } + + int control_status_register = xlnx_tmrctr_read32( + TICK_TIMER_COUNTER_NUMBER, XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(TICK_TIMER_COUNTER_NUMBER, XTC_CSR_LOAD_MASK, XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(TICK_TIMER_COUNTER_NUMBER, + control_status_register | XTC_CSR_ENABLE_TMR_MASK, XTC_TCSR_OFFSET); + + control_status_register = xlnx_tmrctr_read32(SYS_CLOCK_COUNTER_NUMBER, XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(SYS_CLOCK_COUNTER_NUMBER, XTC_CSR_LOAD_MASK, XTC_TCSR_OFFSET); + xlnx_tmrctr_write32(SYS_CLOCK_COUNTER_NUMBER, + control_status_register | XTC_CSR_ENABLE_TMR_MASK, XTC_TCSR_OFFSET); + + xlnx_tmrctr.state = XLNX_TMRCTR_RUNNING; + + return 0; +} + +static int sys_clock_driver_init(void) +{ + int status = xlnx_tmrctr_initialize(); + + if (status != 0) { + return status; + } + +#ifdef CONFIG_TICKLESS_KERNEL + xlnx_tmrctr_set_reset_value(TICK_TIMER_COUNTER_NUMBER, CYCLES_NEXT_MAX); + xlnx_tmrctr_set_options(TICK_TIMER_COUNTER_NUMBER, XTC_CSR_ENABLE_INT_MASK | + XTC_CSR_DOWN_COUNT_MASK); +#else + xlnx_tmrctr_set_reset_value(TICK_TIMER_COUNTER_NUMBER, TIMER_CYCLES_PER_TICK); + xlnx_tmrctr_set_options(TICK_TIMER_COUNTER_NUMBER, XTC_CSR_ENABLE_INT_MASK | + XTC_CSR_AUTO_RELOAD_MASK | + XTC_CSR_DOWN_COUNT_MASK); +#endif + + xlnx_tmrctr_set_options(SYS_CLOCK_COUNTER_NUMBER, XTC_CSR_AUTO_RELOAD_MASK); + + status = xlnx_tmrctr_start(); + + if (status != 0) { + return status; + } + + last_cycles = xlnx_tmrctr_read_count(); + + IRQ_CONNECT(IRQ_TIMER, 0, xlnx_tmrctr_irq_handler, NULL, 0); + irq_enable(IRQ_TIMER); + + return 0; +} + +#if defined(CONFIG_MICROBLAZE) +/** + * @brief Overwrite cycle based busy wait + * Implementation is derived from z_impl_k_busy_wait@kernel/timeout.c + * + * @param usec_to_wait + * @note Microblaze arch already implements an unaccurate, nop based + * no-timer-required busy wait. This routine simply overrides it with + * a much more accurate version. + */ +void arch_busy_wait(uint32_t usec_to_wait) +{ + uint32_t start_cycles = xlnx_tmrctr_read_count(); + + /* use 64-bit math to prevent overflow when multiplying */ + uint32_t cycles_to_wait = + (uint32_t)((uint64_t)usec_to_wait * (uint64_t)TIMER_CYCLES_PER_SEC / + (uint64_t)USEC_PER_SEC); + + for (;;) { + uint32_t current_cycles = xlnx_tmrctr_read_count(); + + /* this handles the rollover on an unsigned 32-bit value */ + if ((current_cycles - start_cycles) >= cycles_to_wait) { + break; + } + } +} +#endif + +SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); diff --git a/dts/bindings/cpu/xlnx,microblaze.yaml b/dts/bindings/cpu/xlnx,microblaze.yaml new file mode 100644 index 000000000000..3b8a94cd326f --- /dev/null +++ b/dts/bindings/cpu/xlnx,microblaze.yaml @@ -0,0 +1,32 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +description: AMD Xilinx MicroBlaze + +compatible: "xlnx,microblaze" + +include: cpu.yaml + +properties: + i-cache-base: + type: int + description: i-cache base + ICACHE_BASEADDR parameter defined in xparameters. + d-cache-base: + type: int + description: d-cache base + DCACHE_BASEADDR parameter defined in xparameters. + i-cache-size: + type: int + description: i-cache size in bytes + ICACHE_BYTE_SIZE parameter defined in xparameters. + d-cache-size: + type: int + description: d-cache size in bytes + DCACHE_BYTE_SIZE parameter defined in xparameters. + d-cache-use-writeback: + type: int + description: If CPU supports flushing without invalidating + C_DCACHE_USE_WRITEBACK parameter defined in xparameters. diff --git a/dts/bindings/interrupt-controller/xlnx,intc.yaml b/dts/bindings/interrupt-controller/xlnx,intc.yaml new file mode 100644 index 000000000000..e3f3c36572e7 --- /dev/null +++ b/dts/bindings/interrupt-controller/xlnx,intc.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +description: Xilinx AXI Interrupt Controller + +compatible: "xlnx,intc" + +include: [interrupt-controller.yaml, base.yaml] + +properties: + reg: + required: true + + "#interrupt-cells": + const: 2 + +interrupt-cells: + - irq + - priority diff --git a/dts/bindings/timer/xlnx,tmrctr.yaml b/dts/bindings/timer/xlnx,tmrctr.yaml new file mode 100644 index 000000000000..42f3896fece2 --- /dev/null +++ b/dts/bindings/timer/xlnx,tmrctr.yaml @@ -0,0 +1,18 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + +description: Xilinx AXI TMRCTR timer counter + +compatible: "xlnx,tmrctr" + +include: base.yaml + +properties: + reg: + required: true + + clock-frequency: + type: int + required: true + description: Clock frequency information for Timer operation diff --git a/dts/microblaze/xlnx/microblaze_demo.dtsi b/dts/microblaze/xlnx/microblaze_demo.dtsi new file mode 100644 index 000000000000..bfa0f1692669 --- /dev/null +++ b/dts/microblaze/xlnx/microblaze_demo.dtsi @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include "skeleton.dtsi" +#include "mem.h" +#include "xparam_helper.h" + +/ { + + microblazeclk: microblaze-clock { + compatible = "fixed-clock"; + clock-frequency = <200000000>; + #clock-cells = <0>; + }; + + peripheralclk: peripheral-clock { + compatible = "fixed-clock"; + clock-frequency = <50000000>; + #clock-cells = <0>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu: cpu@0 { + device_type = "cpu"; + compatible = "xlnx,microblaze"; + reg = <0>; + clock-frequency = <200000000>; + + i-cache-base = <0x00000000>; + i-cache-size = <0x800>; + i-cache-line-size = <16>; + + d-cache-base = <0x00000000>; + d-cache-size = <0x2000>; + d-cache-line-size = <16>; + d-cache-use-writeback = <0>; + }; + }; + + ddr0: memory@0 { + compatible = "mmio-sram"; + reg = <0x00000000 DT_SIZE_M(256)>; + }; + + soc { + interrupt-parent = <&intc0>; + ranges; + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + + intc0: interrupt-controller@fe010000 { + compatible = "xlnx,intc"; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0xfe010000 DT_SIZE_K(128)>; + }; + + tmrctr0: timer@fe00c000 { + compatible = "xlnx,tmrctr"; + reg = <0xfe00c000 DT_SIZE_K(4)>; + interrupts = <15 15>; + clock-frequency = <50000000>; + }; + + uart0: uart@fe020000 { + compatible = "xlnx,xps-uartlite-1.00.a"; + reg = <0xfe020000 DT_SIZE_K(64)>; + interrupts = <14 14>; + clock-frequency = <50000000>; + parity = "none"; + status = "disabled"; + }; + + }; +}; diff --git a/dts/microblaze/xlnx/xparam_helper.h b/dts/microblaze/xlnx/xparam_helper.h new file mode 100644 index 000000000000..9fa8e33148ff --- /dev/null +++ b/dts/microblaze/xlnx/xparam_helper.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_ARCH_MICROBLAZE_INCLUDE_XPARAM_HELPER_H_ +#define ZEPHYR_ARCH_MICROBLAZE_INCLUDE_XPARAM_HELPER_H_ + +#define GET_BASEADDR(dev) (dev##_BASEADDR) +#define GET_HIGHADDR(dev) (dev##_HIGHADDR) +#define GET_SIZE(dev) (GET_HIGHADDR(dev) - GET_BASEADDR(dev) + 1) +#define GET_RANGE(dev) GET_BASEADDR(dev) GET_SIZE(dev) + +#endif /* ZEPHYR_ARCH_MICROBLAZE_INCLUDE_XPARAM_HELPER_H_ */ diff --git a/include/zephyr/arch/arch_inlines.h b/include/zephyr/arch/arch_inlines.h index 0f32159e2f1b..54aeb10896fb 100644 --- a/include/zephyr/arch/arch_inlines.h +++ b/include/zephyr/arch/arch_inlines.h @@ -32,6 +32,8 @@ #include #elif defined(CONFIG_SPARC) #include +#elif defined(CONFIG_MICROBLAZE) +#include #endif #endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */ diff --git a/include/zephyr/arch/cpu.h b/include/zephyr/arch/cpu.h index 1e107512fa2a..e33a95b1af10 100644 --- a/include/zephyr/arch/cpu.h +++ b/include/zephyr/arch/cpu.h @@ -27,6 +27,8 @@ #include #elif defined(CONFIG_MIPS) #include +#elif defined(CONFIG_MICROBLAZE) +#include #elif defined(CONFIG_ARCH_POSIX) #include #elif defined(CONFIG_SPARC) diff --git a/include/zephyr/arch/microblaze/arch.h b/include/zephyr/arch/microblaze/arch.h new file mode 100644 index 000000000000..994f079f52ee --- /dev/null +++ b/include/zephyr/arch/microblaze/arch.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_H_ +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define ARCH_STACK_PTR_ALIGN 16 + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN) + +uint32_t arch_irq_pending(void); +void arch_irq_enable(unsigned int irq); +void arch_irq_disable(unsigned int irq); +int arch_irq_is_enabled(unsigned int irq); +uint32_t arch_irq_set_emulated_pending(uint32_t irq); +uint32_t arch_irq_pending_vector(uint32_t irq_pending); +void z_irq_spurious(const void *unused); + +/** + * Normally used to configure a static interrupt. + * Barebones microblaze has 1 interrupt to offer so we connect + * whatever isr & param supplied to that. SoCs should use this + * macro to connect a single device (can be the AXI interrupt controller) + * to the microblaze's only ISR to eventually make it call XIntc_DeviceInterruptHandler. + * + * @param irq_p IRQ line number + * @param priority_p Interrupt priority + * @param isr_p Interrupt service routine + * @param isr_param_p ISR parameter + * @param flags_p IRQ options + * + * @return The vector assigned to this interrupt + */ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ + { \ + Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ + } + +static ALWAYS_INLINE unsigned int arch_irq_lock(void) +{ + const uint32_t unshifted_msr_ie_status = mfmsr() & MSR_IE_MASK; + + if (unshifted_msr_ie_status) { + extern void microblaze_disable_interrupts(void); + microblaze_disable_interrupts(); + return 1; + } + return 0; +} + +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) +{ + if (key) { + extern void microblaze_enable_interrupts(void); + microblaze_enable_interrupts(); + } +} + +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) +{ + return key != 0; +} + +static ALWAYS_INLINE void arch_nop(void) +{ + __asm__ volatile("nop"); +} + +extern uint32_t sys_clock_cycle_get_32(void); + +static inline uint32_t arch_k_cycle_get_32(void) +{ + return sys_clock_cycle_get_32(); +} + +extern uint64_t sys_clock_cycle_get_64(void); + +static inline uint64_t arch_k_cycle_get_64(void) +{ + return sys_clock_cycle_get_64(); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_H_ */ diff --git a/include/zephyr/arch/microblaze/arch_inlines.h b/include/zephyr/arch/microblaze/arch_inlines.h new file mode 100644 index 000000000000..269017974c63 --- /dev/null +++ b/include/zephyr/arch/microblaze/arch_inlines.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_INLINES_H +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_INLINES_H + +#include +#include + +#define _CPU DT_PATH(cpus, cpu_0) + +static ALWAYS_INLINE unsigned int arch_num_cpus(void) +{ + return CONFIG_MP_MAX_NUM_CPUS; +} + +static inline uint32_t arch_get_cpu_clock_frequency(void) +{ + return DT_PROP_OR(_CPU, clock_frequency, 0); +} + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_ARCH_INLINES_H */ diff --git a/include/zephyr/arch/microblaze/exception.h b/include/zephyr/arch/microblaze/exception.h new file mode 100644 index 000000000000..d625d42f40e3 --- /dev/null +++ b/include/zephyr/arch/microblaze/exception.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_EXP_H_ +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_EXP_H_ + +#ifndef _ASMLANGUAGE +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct arch_esf { + + uint32_t r31; /* Must be saved across function calls. Callee-save. */ + uint32_t r30; /* Must be saved across function calls. Callee-save. */ + uint32_t r29; /* Must be saved across function calls. Callee-save. */ + uint32_t r28; /* Must be saved across function calls. Callee-save. */ + uint32_t r27; /* Must be saved across function calls. Callee-save. */ + uint32_t r26; /* Must be saved across function calls. Callee-save. */ + uint32_t r25; /* Must be saved across function calls. Callee-save. */ + uint32_t r24; /* Must be saved across function calls. Callee-save. */ + uint32_t r23; /* Must be saved across function calls. Callee-save. */ + uint32_t r22; /* Must be saved across function calls. Callee-save. */ + uint32_t r21; /* Must be saved across function calls. Callee-save. */ + /* r20: Reserved for storing a pointer to the global offset table (GOT) + * in position independent code (PIC). Non-volatile in non-PIC code + */ + uint32_t r20; /* Must be saved across function calls. Callee-save.*/ + uint32_t r19; /* must be saved across function-calls. Callee-save */ + + /* general purpose registers */ + uint32_t r18; /* reserved for assembler/compiler temporaries */ + uint32_t r17; /* return address for exceptions. HW if configured else SW */ + uint32_t r16; /* return address for breaks */ + uint32_t r15; /* return address for user vectors */ + uint32_t r14; /* return address for interrupts */ + uint32_t r13; /* read/write small data anchor */ + uint32_t r12; /* temporaries */ + uint32_t r11; /* temporaries */ + uint32_t r10; /* passing parameters / temporaries */ + uint32_t r9; /* passing parameters / temporaries */ + uint32_t r8; /* passing parameters / temporaries */ + uint32_t r7; /* passing parameters / temporaries */ + uint32_t r6; /* passing parameters / temporaries */ + uint32_t r5; /* passing parameters / temporaries */ + uint32_t r4; /* return values / temporaries */ + uint32_t r3; /* return values / temporaries */ + uint32_t r2; /* Read-only small data area anchor */ + uint32_t r1; /* Cstack pointer */ + uint32_t msr; +#if defined(CONFIG_MICROBLAZE_USE_HARDWARE_FLOAT_INSTR) + uint32_t fsr; +#endif +}; + +typedef struct __microblaze_register_dump { + + _callee_saved_t callee_saved; + struct arch_esf esf; + + /* Other SFRs */ + uint32_t pc; + uint32_t esr; + uint32_t ear; + uint32_t edr; + +#if defined(CONFIG_EXTRA_EXCEPTION_INFO) + /* A human readable description of the exception cause. The strings used + * are the same as the #define constant names found in the + * microblaze_exceptions_i.h header file + */ + char *exception_cause_str; +#endif /* defined(CONFIG_EXTRA_EXCEPTION_INFO) */ + +} microblaze_register_dump_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_EXP_H_ */ diff --git a/include/zephyr/arch/microblaze/linker.ld b/include/zephyr/arch/microblaze/linker.ld new file mode 100644 index 000000000000..73987a7216d4 --- /dev/null +++ b/include/zephyr/arch/microblaze/linker.ld @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include +#include +#include +#include + +/* These sections are specific to this CPU */ +#define _EXCEPTION_SECTION_NAME exceptions +#define _RESET_SECTION_NAME reset + +#define ROMABLE_REGION app_ram +#define RAMABLE_REGION app_ram + +ENTRY(CONFIG_KERNEL_ENTRY) + +SECTIONS +{ + +#include + +#ifdef CONFIG_LLEXT +#include +#endif + + GROUP_START(ROMABLE_REGION) + + SECTION_PROLOGUE(.text,,) + { + /* microblaze vectors from 0x0 to 0x50 */ + __mb_vectors = .; + KEEP (*(.vectors.reset)) + . = __mb_vectors + 0x8; + KEEP (*(.vectors.sw_exception)) + . = __mb_vectors + 0x10; + KEEP (*(.vectors.interrupt)) + . = __mb_vectors + 0x20; + KEEP (*(.vectors.hw_exception)) + + /* code */ + . = __mb_vectors + 0x50; + + __text_region_start = .; + + *(.text) + *(.text.*) + *(.gnu.linkonce.t.*) + } GROUP_LINK_IN(RAMABLE_REGION) + + __text_region_end = .; + __text_region_size = __text_region_end - __text_region_start; + + SECTION_DATA_PROLOGUE(_EXCEPTION_SECTION_NAME,,) + { + KEEP(*(".exception.entry.*")) + *(".exception.other.*") + + } GROUP_LINK_IN(ROMABLE_REGION) + + SECTION_PROLOGUE(.note.gnu.build-id,,) + { + KEEP (*(.note.gnu.build-id)) + } GROUP_LINK_IN(ROMABLE_REGION) + + SECTION_PROLOGUE(.init,,) + { + KEEP (*(.init)) + } GROUP_LINK_IN(ROMABLE_REGION) + + SECTION_PROLOGUE(.fini,,) + { + KEEP (*(.fini)) + } GROUP_LINK_IN(ROMABLE_REGION) + + __rodata_region_start = .; + +#include +/* Located in generated directory. This file is populated by calling + * zephyr_linker_sources(ROM_SECTIONS ...). Useful for grouping iterable RO structs. + */ +#include +#include + + SECTION_PROLOGUE(.rodata,,) + { + . = ALIGN(4); + __rodata_start = .; + *(.rodata) + *(.rodata.*) + *(.gnu.linkonce.r.*) + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include + + } GROUP_LINK_IN(ROMABLE_REGION) + +#include + + __rodata_end = .; + __rodata_region_end = .; + __rodata_region_size = __rodata_region_end - __rodata_region_start; + + GROUP_END(ROMABLE_REGION) + + GROUP_START(RAMABLE_REGION) + + _image_ram_start = .; + +#ifdef CONFIG_CPP + /* These exist to keep Vitis Microblaze toolchain happy */ + SECTION_PROLOGUE(.ctors,,) + { + __CTOR_LIST__ = .; + ___CTORS_LIST___ = .; + KEEP (*crtbegin.o(.ctors)) + KEEP (*(EXCLUDE_FILE(*crtend.o) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + __CTOR_END__ = .; + ___CTORS_END___ = .; + } GROUP_LINK_IN(RAMABLE_REGION) + SECTION_PROLOGUE(.dtors,,) + { + __DTOR_LIST__ = .; + ___DTORS_LIST___ = .; + KEEP (*crtbegin.o(.dtors)) + KEEP (*(EXCLUDE_FILE(*crtend.o) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + PROVIDE(__DTOR_END__ = .); + PROVIDE(___DTORS_END___ = .); + } GROUP_LINK_IN(RAMABLE_REGION) +#endif + + SECTION_DATA_PROLOGUE(.sdata2,,) + { + . = ALIGN(4); + __sdata2_start = .; + *( .sdata2) + *(.sdata2.*) + *(.gnu.linkonce.s2.*) + . = ALIGN(4); + __sdata2_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + +#include +#include +#include + + SECTION_DATA_PROLOGUE(.data,,) + { + . = ALIGN(4); + __data_start = .; + *(.data) + *(.data.*) + +#include + + *(.gnu.linkonce.d.*) + . = ALIGN(4); + +#include + } GROUP_LINK_IN(RAMABLE_REGION) + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include + + __data_end = .; + __data_size = __data_end - __data_start; + + SECTION_PROLOGUE(.got,,) + { + . = ALIGN(4); + *(.got) + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.got1,,) + { + . = ALIGN(4); + *(.got1) + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.got2,,) + { + . = ALIGN(4); + *(.got2) + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.jcr,,) + { + . = ALIGN(4); + *(.jcr) + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_DATA_PROLOGUE(.sdata,,) + { + . = ALIGN(4); + __sdata_start = .; + *(.sdata) + *(.sdata.*) + *(.gnu.linkonce.s.*) + + __sdata_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.sbss (NOLOAD),,) + { + . = ALIGN(4); + __sbss_start = .; + *(.sbss) + *(.sbss.*) + *(.gnu.linkonce.sb.*) + . = ALIGN(4); + __sbss_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + + _gp = (__sbss_end + __sdata_start) / 2; + PROVIDE(gp = _gp); + + SECTION_DATA_PROLOGUE(.tdata,,) + { + . = ALIGN(4); + __tdata_start = .; + *(.tdata) + *(.tdata.*) + *(.gnu.linkonce.td.*) + __tdata_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.tbss,,) + { + . = ALIGN(4); + __tbss_start = .; + *(.tbss) + *(.tbss.*) + *(.gnu.linkonce.tb.*) + __tbss_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.bss (NOLOAD),,) + { + . = ALIGN(4); + __bss_start = .; + *(.bss) + *(.bss.*) + *(.gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(4); + __bss_end = .; + } GROUP_LINK_IN(RAMABLE_REGION) + + + _SDA_BASE_ = __sdata_start + ((__sbss_end - __sdata_start) / 2 ); + + _SDA2_BASE_ = __sdata2_start + ((__sdata2_end - __sdata2_start) / 2 ); + +#include + +#ifdef CONFIG_GEN_ISR_TABLES +#include +#endif + +#include + + /* Vitis Microblaze toolchain may moan without these sections, + * but we won't give them any space. Zephyr creates its malloc arena + * after _end. These are in case someone tries to use Vitis GCC with + * its _HEAP_SIZE and _STACK_SIZE definitions. e.g. during migration + * I'm making the linker moan so that you now know better than to rely + * on those values in Zephyr world. + */ + _HEAP_SIZE = DEFINED(_HEAP_SIZE) ? -_HEAP_SIZE : 0; + _STACK_SIZE = DEFINED(_STACK_SIZE) ? -_STACK_SIZE : 0; + SECTION_PROLOGUE(.heap (NOLOAD),,) + { + . += _HEAP_SIZE; + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_PROLOGUE(.stack (NOLOAD),,) + { + . += _STACK_SIZE; + } GROUP_LINK_IN(RAMABLE_REGION) + +#include + + GROUP_END(RAMABLE_REGION) + +#include + +} diff --git a/include/zephyr/arch/microblaze/sys_bitops.h b/include/zephyr/arch/microblaze/sys_bitops.h new file mode 100644 index 000000000000..538229654f80 --- /dev/null +++ b/include/zephyr/arch/microblaze/sys_bitops.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +/* Memory bits manipulation functions in non-arch-specific C code */ + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_BITOPS_H_ +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_BITOPS_H_ + +#ifndef _ASMLANGUAGE + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit) +{ + compiler_barrier(); + uint32_t temp = *(volatile uint32_t *)addr; + *(volatile uint32_t *)addr = temp | (1 << bit); + compiler_barrier(); +} + +static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit) +{ + compiler_barrier(); + uint32_t temp = *(volatile uint32_t *)addr; + *(volatile uint32_t *)addr = temp & ~(1 << bit); + compiler_barrier(); +} + +static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit) +{ + uint32_t temp; + + compiler_barrier(); + temp = *(volatile uint32_t *)addr; + compiler_barrier(); + + return temp & (1 << bit); +} + +static ALWAYS_INLINE void sys_set_bits(mem_addr_t addr, unsigned int mask) +{ + compiler_barrier(); + uint32_t temp = *(volatile uint32_t *)addr; + *(volatile uint32_t *)addr = temp | mask; + compiler_barrier(); +} + +static ALWAYS_INLINE void sys_clear_bits(mem_addr_t addr, unsigned int mask) +{ + compiler_barrier(); + uint32_t temp = *(volatile uint32_t *)addr; + *(volatile uint32_t *)addr = temp & ~mask; + compiler_barrier(); +} + +static ALWAYS_INLINE void sys_bitfield_set_bit(mem_addr_t addr, unsigned int bit) +{ + /* Doing memory offsets in terms of 32-bit values to prevent + * alignment issues + */ + sys_set_bit(addr + ((bit >> 5) << 2), bit & 0x1F); +} + +static ALWAYS_INLINE void sys_bitfield_clear_bit(mem_addr_t addr, unsigned int bit) +{ + sys_clear_bit(addr + ((bit >> 5) << 2), bit & 0x1F); +} + +static ALWAYS_INLINE int sys_bitfield_test_bit(mem_addr_t addr, unsigned int bit) +{ + return sys_test_bit(addr + ((bit >> 5) << 2), bit & 0x1F); +} + +static ALWAYS_INLINE int sys_test_and_set_bit(mem_addr_t addr, unsigned int bit) +{ + int ret; + + ret = sys_test_bit(addr, bit); + sys_set_bit(addr, bit); + + return ret; +} + +static ALWAYS_INLINE int sys_test_and_clear_bit(mem_addr_t addr, unsigned int bit) +{ + int ret; + + ret = sys_test_bit(addr, bit); + sys_clear_bit(addr, bit); + + return ret; +} + +static ALWAYS_INLINE int sys_bitfield_test_and_set_bit(mem_addr_t addr, unsigned int bit) +{ + int ret; + + ret = sys_bitfield_test_bit(addr, bit); + sys_bitfield_set_bit(addr, bit); + + return ret; +} + +static ALWAYS_INLINE int sys_bitfield_test_and_clear_bit(mem_addr_t addr, unsigned int bit) +{ + int ret; + + ret = sys_bitfield_test_bit(addr, bit); + sys_bitfield_clear_bit(addr, bit); + + return ret; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_BITOPS_H_ */ diff --git a/include/zephyr/arch/microblaze/sys_io.h b/include/zephyr/arch/microblaze/sys_io.h new file mode 100644 index 000000000000..83acca074328 --- /dev/null +++ b/include/zephyr/arch/microblaze/sys_io.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * Copyright (c) 2015, Wind River Systems, Inc. + * Copyright (c) 2017, Oticon A/S + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Memory mapped registers I/O functions in non-arch-specific C code */ + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_IO_H_ +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_IO_H_ + +#ifndef _ASMLANGUAGE + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static ALWAYS_INLINE uint8_t sys_read8(mem_addr_t addr) +{ + uint8_t value; + + compiler_barrier(); + value = *(volatile uint8_t *)addr; + compiler_barrier(); + + return value; +} + +static ALWAYS_INLINE void sys_write8(uint8_t data, mem_addr_t addr) +{ + compiler_barrier(); + *(volatile uint8_t *)addr = data; + compiler_barrier(); +} + +static ALWAYS_INLINE uint16_t sys_read16(mem_addr_t addr) +{ + uint16_t value; + + compiler_barrier(); + value = *(volatile uint16_t *)addr; + compiler_barrier(); + + return value; +} + +static ALWAYS_INLINE void sys_write16(uint16_t data, mem_addr_t addr) +{ + compiler_barrier(); + *(volatile uint16_t *)addr = data; + compiler_barrier(); +} + +static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr) +{ + uint32_t value; + + compiler_barrier(); + value = *(volatile uint32_t *)addr; + compiler_barrier(); + + return value; +} + +static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr) +{ + compiler_barrier(); + *(volatile uint32_t *)addr = data; + compiler_barrier(); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_SYS_IO_H_ */ diff --git a/include/zephyr/arch/microblaze/thread.h b/include/zephyr/arch/microblaze/thread.h new file mode 100644 index 000000000000..c1664938621d --- /dev/null +++ b/include/zephyr/arch/microblaze/thread.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * @file + * @brief Per-arch thread definition + * + * This file contains definitions for + * + * struct _thread_arch + * struct _callee_saved + * + * necessary to instantiate instances of struct k_thread. + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_MICROBLAZE_THREAD_H_ +#define ZEPHYR_INCLUDE_ARCH_MICROBLAZE_THREAD_H_ + +#ifndef _ASMLANGUAGE +#include + +/* + * The following structure defines the list of registers that need to be + * saved/restored when a cooperative context switch occurs. + */ +struct _callee_saved { + /* r1 is thread's stack pointer */ + uint32_t r1; + /* IRQ status before irq_lock() and call to z_swap() */ + uint32_t key; + /* Return value of z_swap() */ + uint32_t retval; + /* 1 if the thread cooperatively yielded */ + uint32_t preempted; +}; +typedef struct _callee_saved _callee_saved_t; + +struct _thread_arch { +}; + +typedef struct _thread_arch _thread_arch_t; + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_MICROBLAZE_THREAD_H_ */ diff --git a/include/zephyr/linker/linker-tool-gcc.h b/include/zephyr/linker/linker-tool-gcc.h index 32319a41543a..e918071c9ef3 100644 --- a/include/zephyr/linker/linker-tool-gcc.h +++ b/include/zephyr/linker/linker-tool-gcc.h @@ -55,6 +55,8 @@ /* Not needed */ #elif defined(CONFIG_MIPS) OUTPUT_ARCH("mips") +#elif defined(CONFIG_MICROBLAZE) + OUTPUT_FORMAT("elf32-microblaze") #elif defined(CONFIG_ARCH_POSIX) /* Not needed */ #elif defined(CONFIG_SPARC) diff --git a/include/zephyr/linker/utils.h b/include/zephyr/linker/utils.h index c6f9177c42bf..9dc9a518369a 100644 --- a/include/zephyr/linker/utils.h +++ b/include/zephyr/linker/utils.h @@ -36,7 +36,8 @@ static inline bool linker_is_in_rodata(const void *addr) #if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \ defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \ defined(CONFIG_RISCV) || defined(CONFIG_SPARC) || \ - defined(CONFIG_MIPS) || defined(CONFIG_XTENSA) + defined(CONFIG_MIPS) || defined(CONFIG_XTENSA) || \ + defined(CONFIG_MICROBLAZE) extern char __rodata_region_start[]; extern char __rodata_region_end[]; #define RO_START __rodata_region_start diff --git a/include/zephyr/toolchain/common.h b/include/zephyr/toolchain/common.h index c84328365fd3..b9f24c991a4d 100644 --- a/include/zephyr/toolchain/common.h +++ b/include/zephyr/toolchain/common.h @@ -87,7 +87,8 @@ #define PERFOPT_ALIGN .align 4 #elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || \ - defined(CONFIG_XTENSA) || defined(CONFIG_MIPS) + defined(CONFIG_XTENSA) || defined(CONFIG_MIPS) || \ + defined(CONFIG_MICROBLAZE) #define PERFOPT_ALIGN .balign 4 #elif defined(CONFIG_ARCH_POSIX) diff --git a/include/zephyr/toolchain/gcc.h b/include/zephyr/toolchain/gcc.h index b592eb021e20..2e893d5906e4 100644 --- a/include/zephyr/toolchain/gcc.h +++ b/include/zephyr/toolchain/gcc.h @@ -358,7 +358,7 @@ do { \ #if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) \ || defined(CONFIG_XTENSA) || defined(CONFIG_ARM64) \ - || defined(CONFIG_MIPS) + || defined(CONFIG_MIPS) || defined(CONFIG_MICROBLAZE) #define GTEXT(sym) .global sym; .type sym, %function #define GDATA(sym) .global sym; .type sym, %object #define WTEXT(sym) .weak sym; .type sym, %function @@ -540,7 +540,8 @@ do { \ "\n\t.type\t" #name ",@object") #elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || \ - defined(CONFIG_XTENSA) || defined(CONFIG_MIPS) + defined(CONFIG_XTENSA) || defined(CONFIG_MIPS) || \ + defined(CONFIG_MICROBLAZE) /* No special prefixes necessary for constants in this arch AFAICT */ #define GEN_ABSOLUTE_SYM(name, value) \ diff --git a/scripts/logging/dictionary/dictionary_parser/log_database.py b/scripts/logging/dictionary/dictionary_parser/log_database.py index 83e34e4abebb..b4f2a34714bd 100644 --- a/scripts/logging/dictionary/dictionary_parser/log_database.py +++ b/scripts/logging/dictionary/dictionary_parser/log_database.py @@ -27,6 +27,9 @@ "arm64" : { "kconfig": "CONFIG_ARM64", }, + "microblaze" : { + "kconfig": "CONFIG_MICROBLAZE", + }, "mips" : { "kconfig": "CONFIG_MIPS", }, diff --git a/scripts/schemas/twister/platform-schema.yaml b/scripts/schemas/twister/platform-schema.yaml index c651f83c3e36..626e84beb8c8 100644 --- a/scripts/schemas/twister/platform-schema.yaml +++ b/scripts/schemas/twister/platform-schema.yaml @@ -54,6 +54,7 @@ mapping: "sparc", "x86", "xtensa", + "microblaze", # unit testing "unit", diff --git a/soc/microblaze/microblaze_demo/CMakeLists.txt b/soc/microblaze/microblaze_demo/CMakeLists.txt new file mode 100644 index 000000000000..dfc4e30dd26c --- /dev/null +++ b/soc/microblaze/microblaze_demo/CMakeLists.txt @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + +zephyr_include_directories(include) + +zephyr_sources( + soc.c +) + +set(SOC_LINKER_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld CACHE INTERNAL "") diff --git a/soc/microblaze/microblaze_demo/Kconfig b/soc/microblaze/microblaze_demo/Kconfig new file mode 100644 index 000000000000..6acaec96f3e3 --- /dev/null +++ b/soc/microblaze/microblaze_demo/Kconfig @@ -0,0 +1,9 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +config SOC_XLNX_MICROBLAZE_DEMO + select MICROBLAZE + select XLNX_INTC + select XLNX_TMRCTR diff --git a/soc/microblaze/microblaze_demo/Kconfig.defconfig b/soc/microblaze/microblaze_demo/Kconfig.defconfig new file mode 100644 index 000000000000..52c467d04f63 --- /dev/null +++ b/soc/microblaze/microblaze_demo/Kconfig.defconfig @@ -0,0 +1,17 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +if SOC_XLNX_MICROBLAZE_DEMO + +config SYS_CLOCK_HW_CYCLES_PER_SEC + default 200000000 + +config SYS_CLOCK_TICKS_PER_SEC + default 100 + +config NUM_IRQS + default 32 + +endif # SOC_XLNX_MICROBLAZE_DEMO diff --git a/soc/microblaze/microblaze_demo/Kconfig.soc b/soc/microblaze/microblaze_demo/Kconfig.soc new file mode 100644 index 000000000000..24efc69ee3ea --- /dev/null +++ b/soc/microblaze/microblaze_demo/Kconfig.soc @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) +# Copyright (c) 2023 Alp Sayin +# SPDX-License-Identifier: Apache-2.0 + + +config SOC_XLNX_MICROBLAZE_DEMO + bool + +config SOC + default "microblaze_demo" if SOC_XLNX_MICROBLAZE_DEMO diff --git a/soc/microblaze/microblaze_demo/include/layout.h b/soc/microblaze/microblaze_demo/include/layout.h new file mode 100644 index 000000000000..80f78f3e6367 --- /dev/null +++ b/soc/microblaze/microblaze_demo/include/layout.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_SOC_MICROBLAZE_SOC_INCLUDE_LAYOUT_H_ +#define ZEPHYR_SOC_MICROBLAZE_SOC_INCLUDE_LAYOUT_H_ + +#include + +#define _DDR_NODE DT_CHOSEN(zephyr_sram) +#define _LAYOUT_DDR_LOC DT_REG_ADDR(_DDR_NODE) +#define _LAYOUT_DDR_SIZE DT_REG_SIZE(_DDR_NODE) + +#define _RESET_VECTOR (_LAYOUT_DDR_LOC) +#define _USER_VECTOR (_RESET_VECTOR + 0x8) +#define _INTR_VECTOR (_RESET_VECTOR + 0x10) +#define _EXC_VECTOR (_RESET_VECTOR + 0x20) + +#endif /* ZEPHYR_SOC_MICROBLAZE_SOC_INCLUDE_LAYOUT_H_ */ diff --git a/soc/microblaze/microblaze_demo/include/soc.h b/soc/microblaze/microblaze_demo/include/soc.h new file mode 100644 index 000000000000..659227e97e81 --- /dev/null +++ b/soc/microblaze/microblaze_demo/include/soc.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef __SOC_H_ +#define __SOC_H_ + +#include +#include "layout.h" + +#ifdef CONFIG_XLNX_INTC + +extern void xlnx_intc_irq_enable(uint32_t irq); +extern void xlnx_intc_irq_disable(uint32_t irq); +extern void xlnx_intc_irq_acknowledge(uint32_t mask); +extern uint32_t xlnx_intc_irq_pending(void); +extern uint32_t xlnx_intc_irq_get_enabled(void); +extern uint32_t xlnx_intc_irq_pending_vector(void); + +#endif /* CONFIG_XLNX_INTC */ + +#endif /* __SOC_H_ */ diff --git a/soc/microblaze/microblaze_demo/linker.ld b/soc/microblaze/microblaze_demo/linker.ld new file mode 100644 index 000000000000..62edc10f068a --- /dev/null +++ b/soc/microblaze/microblaze_demo/linker.ld @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include + +_DDR_LOC = _LAYOUT_DDR_LOC; +_DDR_SIZE = _LAYOUT_DDR_SIZE; + +MEMORY +{ + app_ram (RX) : ORIGIN = _DDR_LOC, LENGTH = _DDR_SIZE + + /* Used by and documented in include/linker/intlist.ld */ + IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K +} + +#include diff --git a/soc/microblaze/microblaze_demo/soc.c b/soc/microblaze/microblaze_demo/soc.c new file mode 100644 index 000000000000..a0fd96aa224a --- /dev/null +++ b/soc/microblaze/microblaze_demo/soc.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023 Advanced Micro Devices, Inc. (AMD) + * Copyright (c) 2023 Alp Sayin + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#include "soc.h" + +#include +#include +#include + +#define LOG_LEVEL CONFIG_SOC_LOG_LEVEL +#include +LOG_MODULE_REGISTER(soc); + +#ifdef CONFIG_XLNX_INTC + +/** + * @brief Override arch_irq_enable with a call to Xintc driver + * + * @param irq irq number to enable + */ +void arch_irq_enable(uint32_t irq) +{ + xlnx_intc_irq_enable(irq); +} + +/** + * @brief Override arch_irq_disable with a call to Xintc driver + * + * @param irq irq number to disable + */ +void arch_irq_disable(uint32_t irq) +{ + xlnx_intc_irq_disable(irq); +} + +/** + * @brief Override arch_irq_is_enabled with a call to Xintc driver + * + * @param irq irq number to see if enabled + */ +int arch_irq_is_enabled(unsigned int irq) +{ + return BIT(irq) & xlnx_intc_irq_get_enabled(); +} + +/** + * @brief Returns the currently pending interrupts. + * + * @return Pending IRQ bitmask. Pending IRQs will have their bitfield set to 1. 0 if no interrupt is + * pending. + */ +uint32_t arch_irq_pending(void) +{ + return xlnx_intc_irq_pending(); +}; + +/** + * @brief Returns the vector for highest pending interrupt. + * + * @return Returns the vector for (i.e. index) for highest-prio/lowest-num pending interrupt to be + * used in a jump table. This is used used for sw_isr_table. + */ +uint32_t arch_irq_pending_vector(uint32_t ipending) +{ + ARG_UNUSED(ipending); + return xlnx_intc_irq_pending_vector(); +} + +#endif /* #ifdef CONFIG_XLNX_INTC */ + +/** + * + * @brief Perform basic hardware initialization + * + * @return 0 + */ +static int soc_init(void) +{ + return 0; +} + +SYS_INIT(soc_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); diff --git a/soc/microblaze/microblaze_demo/soc.yml b/soc/microblaze/microblaze_demo/soc.yml new file mode 100644 index 000000000000..ed7a3252ecb8 --- /dev/null +++ b/soc/microblaze/microblaze_demo/soc.yml @@ -0,0 +1,2 @@ +socs: + - name: microblaze_demo diff --git a/subsys/debug/thread_info.c b/subsys/debug/thread_info.c index 742cbd594850..be7efbc58e8b 100644 --- a/subsys/debug/thread_info.c +++ b/subsys/debug/thread_info.c @@ -77,6 +77,9 @@ const size_t _kernel_thread_info_offsets[] = { #elif defined(CONFIG_NIOS2) [THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread, callee_saved.sp), +#elif defined(CONFIG_MICROBLAZE) + [THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread, + callee_saved.r1), #elif defined(CONFIG_RISCV) [THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread, callee_saved.sp), diff --git a/subsys/mgmt/mcumgr/grp/os_mgmt/include/os_mgmt_processor.h b/subsys/mgmt/mcumgr/grp/os_mgmt/include/os_mgmt_processor.h index 71dfda7c1931..02ff5c2295cc 100644 --- a/subsys/mgmt/mcumgr/grp/os_mgmt/include/os_mgmt_processor.h +++ b/subsys/mgmt/mcumgr/grp/os_mgmt/include/os_mgmt_processor.h @@ -154,6 +154,8 @@ extern "C" { #define PROCESSOR_NAME "xtensa" #elif defined(CONFIG_SPARC) #define PROCESSOR_NAME "sparc" +#elif defined(CONFIG_MICROBLAZE) +#define PROCESSOR_NAME "microblaze" #endif #ifndef PROCESSOR_NAME diff --git a/subsys/testsuite/include/zephyr/interrupt_util.h b/subsys/testsuite/include/zephyr/interrupt_util.h index a3653618d970..0e727ca6c5c0 100644 --- a/subsys/testsuite/include/zephyr/interrupt_util.h +++ b/subsys/testsuite/include/zephyr/interrupt_util.h @@ -213,6 +213,12 @@ static inline void trigger_irq(int irq) z_vim_arm_enter_irq(irq); } +#elif defined(CONFIG_MICROBLAZE) +static inline void trigger_irq(int irq) +{ + EMULATE_IRQ(irq); +} + #else /* So far, Nios II does not support this */ #define NO_TRIGGER_FROM_SW diff --git a/subsys/testsuite/include/zephyr/test_asm_inline_gcc.h b/subsys/testsuite/include/zephyr/test_asm_inline_gcc.h index aba64c450c8c..a68ec9dec6ed 100644 --- a/subsys/testsuite/include/zephyr/test_asm_inline_gcc.h +++ b/subsys/testsuite/include/zephyr/test_asm_inline_gcc.h @@ -48,6 +48,8 @@ static inline void timestamp_serialize(void) #define timestamp_serialize() #elif defined(CONFIG_MIPS) #define timestamp_serialize() +#elif defined(CONFIG_MICROBLAZE) +#define timestamp_serialize() #else #error implementation of timestamp_serialize() not provided for your CPU target #endif diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index 699c7bdc642b..cf4de0a05f67 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -65,6 +65,8 @@ * unless TICK_IRQ is defined here for them */ #endif /* defined(CONFIG_ARCH_POSIX) */ +#elif defined(CONFIG_MICROBLAZE) && defined(CONFIG_XLNX_TMRCTR) +#define TICK_IRQ DT_IRQN(DT_INST(CONFIG_XLNX_TMRCTR_TIMER_INDEX, xlnx_tmrctr)) #else extern const int32_t z_sys_timer_irq_for_test; @@ -72,10 +74,11 @@ extern const int32_t z_sys_timer_irq_for_test; #endif -/* Cortex-M1 and Nios II do have a power saving instruction, so k_cpu_idle() +/* Cortex-M1, MicroBlaze and Nios II do have a power saving instruction, so k_cpu_idle() * returns immediately */ -#if !defined(CONFIG_CPU_CORTEX_M1) && !defined(CONFIG_NIOS2) +#if !defined(CONFIG_CPU_CORTEX_M1) && !defined(CONFIG_NIOS2) && \ + (!(defined(CONFIG_MICROBLAZE) && defined(CONFIG_MICROBLAZE_IDLE_NOP))) #define HAS_POWERSAVE_INSTRUCTION #endif diff --git a/tests/kernel/fatal/exception/src/main.c b/tests/kernel/fatal/exception/src/main.c index a946d02de7f9..2b1ce885d8d9 100644 --- a/tests/kernel/fatal/exception/src/main.c +++ b/tests/kernel/fatal/exception/src/main.c @@ -91,7 +91,8 @@ void entry_cpu_exception(void *p1, void *p2, void *p3) __asm__ volatile (".word 0x77777777"); #else /* Triggers usage fault on ARM, illegal instruction on - * xtensa, TLB exception (instruction fetch) on MIPS. + * xtensa, TLB exception (instruction fetch) on MIPS, + * illegal op-code instruction on microblaze */ { volatile long illegal = 0; @@ -449,6 +450,11 @@ ZTEST(fatal_exception, test_fatal) static void *fatal_setup(void) { + +#if defined(CONFIG_MICROBLAZE) + microblaze_enable_exceptions(); +#endif + #if defined(CONFIG_DEMAND_PAGING) && \ !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT) uintptr_t pin_addr; diff --git a/tests/kernel/interrupt/src/interrupt_offload.c b/tests/kernel/interrupt/src/interrupt_offload.c index b2ac007c14ff..418d63f9bb78 100644 --- a/tests/kernel/interrupt/src/interrupt_offload.c +++ b/tests/kernel/interrupt/src/interrupt_offload.c @@ -98,6 +98,8 @@ void isr_handler(const void *param) #define TEST_IRQ_DYN_LINE 0 #endif +#elif defined(CONFIG_MICROBLAZE) +#define TEST_IRQ_DYN_LINE 1 #else #define TEST_IRQ_DYN_LINE 0 #endif diff --git a/tests/kernel/mem_protect/stackprot/testcase.yaml b/tests/kernel/mem_protect/stackprot/testcase.yaml index c8955a0cfa15..3ee36e1aa4a0 100644 --- a/tests/kernel/mem_protect/stackprot/testcase.yaml +++ b/tests/kernel/mem_protect/stackprot/testcase.yaml @@ -5,6 +5,7 @@ tests: - xtensa - posix - sparc + - microblaze tags: - kernel - userspace diff --git a/tests/lib/mpsc_pbuf/testcase.yaml b/tests/lib/mpsc_pbuf/testcase.yaml index 50d9722e4264..aeb1ada0c7cd 100644 --- a/tests/lib/mpsc_pbuf/testcase.yaml +++ b/tests/lib/mpsc_pbuf/testcase.yaml @@ -10,6 +10,7 @@ tests: - qemu_cortex_r5 - qemu_leon3 - qemu_nios2 + - qemu_microblaze - qemu_riscv32 - qemu_riscv64 - qemu_x86 diff --git a/tests/lib/sprintf/src/main.c b/tests/lib/sprintf/src/main.c index 8395be690ffb..ec54afb98ddc 100644 --- a/tests/lib/sprintf/src/main.c +++ b/tests/lib/sprintf/src/main.c @@ -902,17 +902,22 @@ ZTEST(sprintf, test_put) ZTEST(sprintf, test_fwrite) { int ret; + FILE *stream; - ret = fwrite("This 3", 0, 0, stdout); + stream = stdout; + ret = fwrite("This 3", 0, 0, stream); zassert_equal(ret, 0, "fwrite failed!"); - ret = fwrite("This 3", 0, 4, stdout); + stream = stdout; + ret = fwrite("This 3", 0, 4, stream); zassert_equal(ret, 0, "fwrite failed!"); - ret = fwrite("This 3", 1, 4, stdout); + stream = stdout; + ret = fwrite("This 3", 1, 4, stream); zassert_equal(ret, 4, "fwrite failed!"); - ret = fwrite("This 3", 1, 4, stdin); + stream = stdin; + ret = fwrite("This 3", 1, 4, stream); zassert_equal(ret, 0, "fwrite failed!"); } diff --git a/tests/subsys/secure_storage/psa/its/testcase.yaml b/tests/subsys/secure_storage/psa/its/testcase.yaml index 807fdf4bf44c..bd5f21f1cc10 100644 --- a/tests/subsys/secure_storage/psa/its/testcase.yaml +++ b/tests/subsys/secure_storage/psa/its/testcase.yaml @@ -3,6 +3,8 @@ common: - native_sim platform_exclude: - qemu_cortex_m0 # settings subsystem initialization fails + arch_exclude: + - microblaze # there is no fucking filter in here... timeout: 120 tags: - psa.secure_storage