diff --git a/arch/lib/Kconfig b/arch/lib/Kconfig index eef92af0e711..351dd99a65be 100644 --- a/arch/lib/Kconfig +++ b/arch/lib/Kconfig @@ -25,7 +25,7 @@ config MODULES option modules config MMU - def_bool n + def_bool y config FPU def_bool n @@ -61,9 +61,6 @@ config BASE_FULL config SELECT_MEMORY_MODEL def_bool n -config FLAT_NODE_MEM_MAP - def_bool n - config PAGEFLAGS_EXTENDED def_bool n @@ -121,4 +118,44 @@ source "crypto/Kconfig" source "lib/Kconfig" config SLIB - def_bool y \ No newline at end of file + def_bool y + +config HAVE_MEMBLOCK + def_bool y + +config DEBUG_INFO + def_bool y + +config FLAT_NODE_MEM_MAP + def_bool y + +config NO_BOOTMEM + def_bool y + +config HIGHMEM + def_bool y + +config CPU_COPY_V6 + def_bool y + +config CPU_TLB_V7 + def_bool y + +config PAGE_OFFSET + hex + default 0xC0000000 + +config PHYS_OFFSET + hex + default 0x00000000 + +config ARM_L1_CACHE_SHIFT + int + default 6 + +config PGTABLE_LEVELS + int + default 2 + +config HAVE_ARCH_PFN_VALID + def_bool y diff --git a/arch/lib/Makefile b/arch/lib/Makefile index dc1f0addc5d9..0104e32aa32f 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -99,7 +99,8 @@ kernel/time/_to_keep=time.o timekeeping.o jiffies.o clocksource.o ntp.o kernel/rcu_to_keep=rcu/srcu.o rcu/pdate.o rcu/tiny.o kernel/locking_to_keep=locking/mutex.o kernel/bpf_to_keep=bpf/core.o -mm/_to_keep=util.o list_lru.o slib.o +mm/_to_keep=util.o list_lru.o slib.o page_alloc.o memblock.o mmzone.o slib_env.o \ +nobootmem.o highmem.o oom_kill.o crypto/_to_keep=aead.o ahash.o shash.o api.o algapi.o cipher.o compress.o proc.o \ crc32c_generic.o rng.o drivers/base/_to_keep=class.o core.o bus.o dd.o driver.o devres.o module.o map.o syscore.o @@ -127,7 +128,7 @@ quiet_cmd_objsmk = OBJS-MK $@ done > $@ $(ARCH_DIR)/objs.mk: $(ARCH_DIR)/Makefile.print $(srctree)/.config $(ARCH_DIR)/Makefile - +$(call if_changed,objsmk) + +$(call if_changed,objsmk); quiet_cmd_linker = GEN $@ cmd_linker = ld -shared --verbose | ./$^ > $@ diff --git a/arch/lib/fs.c b/arch/lib/fs.c index 33efe5f1da32..7757a1687c86 100644 --- a/arch/lib/fs.c +++ b/arch/lib/fs.c @@ -63,8 +63,3 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, { return -ENOSYS; } - -unsigned int nr_free_buffer_pages(void) -{ - return 65535; -} diff --git a/arch/lib/glue.c b/arch/lib/glue.c index bdbed913ee9e..11ce23998d62 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -18,12 +18,12 @@ #include #include #include +#include #include #include "sim-assert.h" #include "sim.h" #include "lib.h" - struct pipe_buffer; struct file; struct pipe_inode_info; @@ -31,10 +31,10 @@ struct wait_queue_t; struct kernel_param; struct super_block; +struct mm_struct init_mm; + /* defined in sched.c, used in net/sched/em_meta.c */ unsigned long avenrun[3]; -/* defined in mm/page_alloc.c */ -struct pglist_data __refdata contig_page_data; /* defined in linux/mmzone.h mm/memory.c */ struct page *mem_map = 0; /* used by sysinfo in kernel/timer.c */ @@ -59,6 +59,30 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); +/* memory.c */ +unsigned long highest_memmap_pfn __read_mostly; +unsigned long max_mapnr; + +/* + * Randomize the address space (stacks, mmaps, brk, etc.). + * + * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, + * as ancient (libc5 based) binaries can segfault. ) + */ +int randomize_va_space __read_mostly = +#ifdef CONFIG_COMPAT_BRK + 1; +#else + 2; +#endif + +/* vmscan.c */ +unsigned long vm_total_pages; + +/* arm/mmu.c */ +pgprot_t pgprot_kernel; + + struct backing_dev_info noop_backing_dev_info = { .name = "noop", .capabilities = 0, @@ -282,3 +306,18 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { } + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + lib_assert(false); + return 0; +} + +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +int pfn_valid(unsigned long pfn) +{ + return memblock_is_memory(__pfn_to_phys(pfn)); +} +#endif diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h index 47adcc659b50..d7a40f4040c8 100644 --- a/arch/lib/include/asm/barrier.h +++ b/arch/lib/include/asm/barrier.h @@ -1,8 +1,88 @@ -#include +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#include + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#else +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() do { dsb(st); outer_sync(); } while (0) +#define dma_rmb() dmb(osh) +#define dma_wmb() dmb(oshst) +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb(ish) +#define smp_rmb() smp_mb() +#define smp_wmb() dmb(ishst) +#endif -#undef smp_store_release #define smp_store_release(p, v) \ - do { \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ - } while (0) +do { \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/lib/include/asm/cache.h b/arch/lib/include/asm/cache.h new file mode 120000 index 000000000000..b186a8c20991 --- /dev/null +++ b/arch/lib/include/asm/cache.h @@ -0,0 +1 @@ +../../../arm/include/asm/cache.h \ No newline at end of file diff --git a/arch/lib/include/asm/cachetype.h b/arch/lib/include/asm/cachetype.h new file mode 120000 index 000000000000..5a58837efbbd --- /dev/null +++ b/arch/lib/include/asm/cachetype.h @@ -0,0 +1 @@ +../../../arm/include/asm/cachetype.h \ No newline at end of file diff --git a/arch/lib/include/asm/elf.h b/arch/lib/include/asm/elf.h deleted file mode 100644 index a7396c9d4225..000000000000 --- a/arch/lib/include/asm/elf.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_SIM_ELF_H -#define _ASM_SIM_ELF_H - -#if defined(CONFIG_64BIT) -#define ELF_CLASS ELFCLASS64 -#else -#define ELF_CLASS ELFCLASS32 -#endif - -#endif /* _ASM_SIM_ELF_H */ diff --git a/arch/lib/include/asm/elf.h b/arch/lib/include/asm/elf.h new file mode 120000 index 000000000000..55f48b91b219 --- /dev/null +++ b/arch/lib/include/asm/elf.h @@ -0,0 +1 @@ +../../../arm/include/asm/elf.h \ No newline at end of file diff --git a/arch/lib/include/asm/glue-proc.h b/arch/lib/include/asm/glue-proc.h new file mode 120000 index 000000000000..dd3e9c28772d --- /dev/null +++ b/arch/lib/include/asm/glue-proc.h @@ -0,0 +1 @@ +../../../arm/include/asm/glue-proc.h \ No newline at end of file diff --git a/arch/lib/include/asm/glue.h b/arch/lib/include/asm/glue.h new file mode 100644 index 000000000000..fbf71d75ec83 --- /dev/null +++ b/arch/lib/include/asm/glue.h @@ -0,0 +1,25 @@ +/* + * arch/arm/include/asm/glue.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file provides the glue to stick the processor-specific bits + * into the kernel in an efficient manner. The idea is to use branches + * when we're only targeting one class of TLB, or indirect calls + * when we're targeting multiple classes of TLBs. + */ +#ifdef __KERNEL__ + +#ifdef __STDC__ +#define ____glue(name,fn) name##fn +#else +#define ____glue(name,fn) name/**/fn +#endif +#define __glue(name,fn) ____glue(name,fn) + +#endif diff --git a/arch/lib/include/asm/hardirq.h b/arch/lib/include/asm/hardirq.h deleted file mode 100644 index 47d47f95a793..000000000000 --- a/arch/lib/include/asm/hardirq.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_SIM_HARDIRQ_H -#define _ASM_SIM_HARDIRQ_H - -extern unsigned int interrupt_pending; - -#define local_softirq_pending() (interrupt_pending) - -#endif /* _ASM_SIM_HARDIRQ_H */ diff --git a/arch/lib/include/asm/hardirq.h b/arch/lib/include/asm/hardirq.h new file mode 120000 index 000000000000..09b5f09acbf1 --- /dev/null +++ b/arch/lib/include/asm/hardirq.h @@ -0,0 +1 @@ +../../../arm/include/asm/hardirq.h \ No newline at end of file diff --git a/arch/lib/include/asm/highmem.h b/arch/lib/include/asm/highmem.h new file mode 120000 index 000000000000..3b7bc556a3a5 --- /dev/null +++ b/arch/lib/include/asm/highmem.h @@ -0,0 +1 @@ +../../../arm/include/asm/highmem.h \ No newline at end of file diff --git a/arch/lib/include/asm/hwcap.h b/arch/lib/include/asm/hwcap.h new file mode 120000 index 000000000000..637fa7d2cc6b --- /dev/null +++ b/arch/lib/include/asm/hwcap.h @@ -0,0 +1 @@ +../../../arm/include/asm/hwcap.h \ No newline at end of file diff --git a/arch/lib/include/asm/kmap_types.h b/arch/lib/include/asm/kmap_types.h new file mode 120000 index 000000000000..c36c0bb637f1 --- /dev/null +++ b/arch/lib/include/asm/kmap_types.h @@ -0,0 +1 @@ +../../../arm/include/asm/kmap_types.h \ No newline at end of file diff --git a/arch/lib/include/asm/memory.h b/arch/lib/include/asm/memory.h new file mode 120000 index 000000000000..af1ba14762a0 --- /dev/null +++ b/arch/lib/include/asm/memory.h @@ -0,0 +1 @@ +../../../arm/include/asm/memory.h \ No newline at end of file diff --git a/arch/lib/include/asm/mmu.h b/arch/lib/include/asm/mmu.h new file mode 120000 index 000000000000..51afcdf84514 --- /dev/null +++ b/arch/lib/include/asm/mmu.h @@ -0,0 +1 @@ +../../../arm/include/asm/mmu.h \ No newline at end of file diff --git a/arch/lib/include/asm/outercache.h b/arch/lib/include/asm/outercache.h new file mode 120000 index 000000000000..eb3562b3e00a --- /dev/null +++ b/arch/lib/include/asm/outercache.h @@ -0,0 +1 @@ +../../../arm/include/asm/outercache.h \ No newline at end of file diff --git a/arch/lib/include/asm/page-arm.h b/arch/lib/include/asm/page-arm.h new file mode 120000 index 000000000000..db56b3d5b0cb --- /dev/null +++ b/arch/lib/include/asm/page-arm.h @@ -0,0 +1 @@ +../../../arm/include/asm/page.h \ No newline at end of file diff --git a/arch/lib/include/asm/page.h b/arch/lib/include/asm/page.h index 8c0aa7437374..faa4a5ed2266 100644 --- a/arch/lib/include/asm/page.h +++ b/arch/lib/include/asm/page.h @@ -1,14 +1,16 @@ -#ifndef _ASM_SIM_PAGE_H -#define _ASM_SIM_PAGE_H - -typedef struct {} pud_t; - -#define THREAD_ORDER 1 -#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) +/* + * arch/arm/include/asm/page.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_LIB_PAGE_H +#define _ASM_LIB_PAGE_H #define WANT_PAGE_VIRTUAL 1 +#include -#include -#include - -#endif /* _ASM_SIM_PAGE_H */ +#endif diff --git a/arch/lib/include/asm/pgtable-2level-hwdef.h b/arch/lib/include/asm/pgtable-2level-hwdef.h new file mode 120000 index 000000000000..a3062387ce04 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level-hwdef.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-2level-types.h b/arch/lib/include/asm/pgtable-2level-types.h new file mode 120000 index 000000000000..41d09767ed1c --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-types.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level-types.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-2level.h b/arch/lib/include/asm/pgtable-2level.h new file mode 120000 index 000000000000..c95b6b679a30 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-hwdef.h b/arch/lib/include/asm/pgtable-hwdef.h new file mode 120000 index 000000000000..fabe180c1494 --- /dev/null +++ b/arch/lib/include/asm/pgtable-hwdef.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-hwdef.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable.h b/arch/lib/include/asm/pgtable.h deleted file mode 100644 index ce599c852795..000000000000 --- a/arch/lib/include/asm/pgtable.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _ASM_SIM_PGTABLE_H -#define _ASM_SIM_PGTABLE_H - -#define PAGE_KERNEL ((pgprot_t) {0 }) - -#define arch_start_context_switch(prev) do {} while (0) - -#define kern_addr_valid(addr)(1) -#define pte_file(pte)(1) -/* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 5) & 0x1f) -#define __swp_offset(x) ((x).val >> 11) -#define __swp_entry(type, offset) \ - ((swp_entry_t) {((type) << 5) | ((offset) << 11) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) {pte_val((pte)) }) -#define __swp_entry_to_pte(x) ((pte_t) {(x).val }) -#define pmd_page(pmd) (struct page *)(pmd_val(pmd) & PAGE_MASK) -#define pgtable_cache_init() do { } while (0) - -static inline int pte_swp_soft_dirty(pte_t pte) -{ - return 0; -} - -static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) -{ - return pte; -} - -#endif /* _ASM_SIM_PGTABLE_H */ diff --git a/arch/lib/include/asm/pgtable.h b/arch/lib/include/asm/pgtable.h new file mode 120000 index 000000000000..26b97b4ba905 --- /dev/null +++ b/arch/lib/include/asm/pgtable.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable.h \ No newline at end of file diff --git a/arch/lib/include/asm/proc-fns.h b/arch/lib/include/asm/proc-fns.h new file mode 120000 index 000000000000..18838a47591c --- /dev/null +++ b/arch/lib/include/asm/proc-fns.h @@ -0,0 +1 @@ +../../../arm/include/asm/proc-fns.h \ No newline at end of file diff --git a/arch/lib/include/asm/processor.h b/arch/lib/include/asm/processor.h index 4ac2e89127a0..15293c89e86b 100644 --- a/arch/lib/include/asm/processor.h +++ b/arch/lib/include/asm/processor.h @@ -7,9 +7,7 @@ struct thread_struct {}; #define cpu_relax_lowlatency() cpu_relax() #define KSTK_ESP(tsk) (0) -# define current_text_addr() ({ __label__ _l; _l: &&_l; }) - -#define TASK_SIZE ((~(long)0)) +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) #define thread_saved_pc(x) (unsigned long)0 #define task_pt_regs(t) NULL diff --git a/arch/lib/include/asm/ptrace-arm.h b/arch/lib/include/asm/ptrace-arm.h new file mode 120000 index 000000000000..1e13abc0f023 --- /dev/null +++ b/arch/lib/include/asm/ptrace-arm.h @@ -0,0 +1 @@ +../../../arm/include/asm/ptrace.h \ No newline at end of file diff --git a/arch/lib/include/asm/ptrace.h b/arch/lib/include/asm/ptrace.h index ddd97080b759..46551c8fdda0 100644 --- a/arch/lib/include/asm/ptrace.h +++ b/arch/lib/include/asm/ptrace.h @@ -1,4 +1,6 @@ #ifndef _ASM_SIM_PTRACE_H #define _ASM_SIM_PTRACE_H +#include + #endif /* _ASM_SIM_PTRACE_H */ diff --git a/arch/lib/include/asm/thread_info.h b/arch/lib/include/asm/thread_info.h index ec316c613041..c8fc65efd435 100644 --- a/arch/lib/include/asm/thread_info.h +++ b/arch/lib/include/asm/thread_info.h @@ -33,4 +33,9 @@ static inline bool test_and_clear_restore_sigmask(void) return true; } + +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_START_SP (THREAD_SIZE - 8) + #endif /* _ASM_SIM_THREAD_INFO_H */ diff --git a/arch/lib/include/asm/tlbflush.h b/arch/lib/include/asm/tlbflush.h new file mode 120000 index 000000000000..03e072b029ab --- /dev/null +++ b/arch/lib/include/asm/tlbflush.h @@ -0,0 +1 @@ +../../../arm/include/asm/tlbflush.h \ No newline at end of file diff --git a/arch/lib/include/asm/user.h b/arch/lib/include/asm/user.h new file mode 120000 index 000000000000..36a6cda21e4a --- /dev/null +++ b/arch/lib/include/asm/user.h @@ -0,0 +1 @@ +../../../arm/include/asm/user.h \ No newline at end of file diff --git a/arch/lib/include/asm/vdso_datapage.h b/arch/lib/include/asm/vdso_datapage.h new file mode 120000 index 000000000000..46b4b5e75db8 --- /dev/null +++ b/arch/lib/include/asm/vdso_datapage.h @@ -0,0 +1 @@ +../../../arm/include/asm/vdso_datapage.h \ No newline at end of file diff --git a/arch/lib/include/sim.h b/arch/lib/include/sim.h index b30d7e878325..0cc54dbbde2d 100644 --- a/arch/lib/include/sim.h +++ b/arch/lib/include/sim.h @@ -14,6 +14,8 @@ #include "sim-types.h" +extern void init_memory_system(void); + /* API called from within linux kernel. Forwards to SimImported. */ int lib_vprintf(const char *str, va_list args); void *lib_malloc(unsigned long size); diff --git a/arch/lib/include/uapi/asm/hwcap.h b/arch/lib/include/uapi/asm/hwcap.h new file mode 120000 index 000000000000..f4557b5629aa --- /dev/null +++ b/arch/lib/include/uapi/asm/hwcap.h @@ -0,0 +1 @@ +../../../../arm/include/uapi/asm/hwcap.h \ No newline at end of file diff --git a/arch/lib/include/uapi/asm/ptrace.h b/arch/lib/include/uapi/asm/ptrace.h new file mode 120000 index 000000000000..3eb86732ae4c --- /dev/null +++ b/arch/lib/include/uapi/asm/ptrace.h @@ -0,0 +1 @@ +../../../../arm/include/uapi/asm/ptrace.h \ No newline at end of file diff --git a/arch/lib/lib.c b/arch/lib/lib.c index 10879aecff6c..24c31c5f6f61 100644 --- a/arch/lib/lib.c +++ b/arch/lib/lib.c @@ -168,6 +168,8 @@ void lib_init(struct SimExported *exported, const struct SimImported *imported, pr_notice("%s", linux_banner); + init_memory_system(); + rcu_init(); /* in drivers/base/core.c (called normally by drivers/base/init.c) */ diff --git a/arch/lib/lib.h b/arch/lib/lib.h index abf2a2628bb2..a8be78ea69f9 100644 --- a/arch/lib/lib.h +++ b/arch/lib/lib.h @@ -18,4 +18,8 @@ struct SimTask { void *private; }; +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int memblock_is_memory(phys_addr_t addr); +#endif + #endif /* LIB_H */ diff --git a/arch/lib/softirq.c b/arch/lib/softirq.c index 3f6363a70317..88699a381562 100644 --- a/arch/lib/softirq.c +++ b/arch/lib/softirq.c @@ -11,6 +11,10 @@ #include "sim.h" #include "sim-assert.h" +#ifndef __ARCH_IRQ_STAT +irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; +EXPORT_SYMBOL(irq_stat); +#endif static struct softirq_action softirq_vec[NR_SOFTIRQS]; static struct SimTask *g_softirq_task = 0; diff --git a/arch/lib/sysctl.c b/arch/lib/sysctl.c index 5f08f9f97103..595fe0005651 100644 --- a/arch/lib/sysctl.c +++ b/arch/lib/sysctl.c @@ -11,9 +11,17 @@ #include #include #include +#include #include "sim-assert.h" #include "sim-types.h" +int mmap_min_addr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + lib_assert(false); + return 0; +} + int drop_caches_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { @@ -93,9 +101,6 @@ int sched_rt_handler(struct ctl_table *table, int write, int sysctl_overcommit_memory = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio = 50; -int sysctl_panic_on_oom = 0; -int sysctl_oom_dump_tasks = 0; -int sysctl_oom_kill_allocating_task = 0; int sysctl_nr_trim_pages = 0; int sysctl_drop_caches = 0; int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES - 1] = { 32 }; @@ -111,7 +116,10 @@ int dirty_background_ratio = 10; unsigned int dirty_expire_interval = 30 * 100; unsigned int dirty_writeback_interval = 5 * 100; unsigned long dirty_background_bytes = 0; -int percpu_pagelist_fraction = 0; + +int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; +unsigned long dac_mmap_min_addr = 4096; + int panic_timeout = 0; int panic_on_oops = 0; int printk_delay_msec = 0; @@ -122,7 +130,7 @@ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int pid_max = PID_MAX_DEFAULT; int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; -int min_free_kbytes = 1024; + int max_threads = 100; int laptop_mode = 0; diff --git a/mm/Makefile b/mm/Makefile index 4e2607fc1208..d68cc9a89aff 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SLOB) += slob.o -obj-$(CONFIG_SLIB) += slib.o +obj-$(CONFIG_SLIB) += slib.o slib_env.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_KSM) += ksm.o obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o diff --git a/mm/highmem.c b/mm/highmem.c index 123bcd3ed4f2..5e72abe70642 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -34,6 +34,10 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#ifdef CONFIG_LIB +#define cache_is_vivt() 0 +#endif + /* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 48aaf7b9f253..72de8d0689ac 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -68,6 +68,8 @@ #include #include "internal.h" +#include "slib_env.h" + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -111,7 +113,39 @@ EXPORT_SYMBOL(node_states); /* Protect totalram_pages and zone->managed_pages */ static DEFINE_SPINLOCK(managed_page_count_lock); +#ifndef CONFIG_LIB unsigned long totalram_pages __read_mostly; +#endif + +static void print_buddy_freelist(void) +{ + struct zone *zone; + unsigned int order, t; + struct list_head *curr; + unsigned long pfn; + int i = 0; + + for_each_zone(zone) { + pr_info("For zone %s %lu\n", zone->name, + zone->present_pages); + if (zone->present_pages == 0) + goto out; + + for_each_migratetype_order(order, t) { + struct free_area area = zone->free_area[order]; + + list_for_each(curr, &area.free_list[t]) { + pfn = page_to_pfn(list_entry(curr, + struct page, lru)); + pr_info("%lu %d %d %d\n", pfn, order, t, i); + i++; + } + } + } +out: + pr_info("Totoal free page2: %d\n", i); +} + unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; /* @@ -123,6 +157,7 @@ unsigned long totalcma_pages __read_mostly; unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; + gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; /* @@ -186,6 +221,8 @@ int pageblock_order __read_mostly; static void __free_pages_ok(struct page *page, unsigned int order); + +#ifndef CONFIG_LIB /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) @@ -209,6 +246,7 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { #endif 32, }; +#endif EXPORT_SYMBOL(totalram_pages); @@ -3268,6 +3306,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; +#ifdef CONFIG_LIB + page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); +#endif return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -3292,6 +3333,7 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) } EXPORT_SYMBOL(__get_free_pages); + unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_pages(gfp_mask | __GFP_ZERO, 0); @@ -3310,6 +3352,7 @@ void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); +#ifndef CONFIG_LIB void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { @@ -3317,8 +3360,18 @@ void free_pages(unsigned long addr, unsigned int order) __free_pages(virt_to_page((void *)addr), order); } } +EXPORT_SYMBOL(free_pages); +#else +void free_pages(unsigned long addr, unsigned int order) +{ + unsigned long pfn = addr - (unsigned long) total_ram; + pfn = pfn >> PAGE_SHIFT; + if (pfn != 0) + __free_pages(pfn_to_page(pfn), order); +} EXPORT_SYMBOL(free_pages); +#endif /* * Page Fragment: @@ -3578,10 +3631,17 @@ static unsigned long nr_free_zone_pages(int offset) * nr_free_buffer_pages() counts the number of pages which are beyond the high * watermark within ZONE_DMA and ZONE_NORMAL. */ +#ifndef CONFIG_LIB unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } +#else +unsigned long nr_free_buffer_pages(void) +{ + return 65535; +} +#endif EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /** @@ -3601,6 +3661,7 @@ static inline void show_node(struct zone *zone) printk("Node %d ", zone_to_nid(zone)); } +#ifndef CONFIG_LIB void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; @@ -3613,6 +3674,7 @@ void si_meminfo(struct sysinfo *val) } EXPORT_SYMBOL(si_meminfo); +#endif #ifdef CONFIG_NUMA void si_meminfo_node(struct sysinfo *val, int nid) @@ -5968,7 +6030,7 @@ void free_highmem_page(struct page *page) } #endif - +#ifndef CONFIG_LIB void __init mem_init_print_info(const char *str) { unsigned long physpages, codesize, datasize, rosize, bss_size; @@ -6021,6 +6083,7 @@ void __init mem_init_print_info(const char *str) #endif str ? ", " : "", str ? str : ""); } +#endif /** * set_dma_reserve - set the specified number of pages reserved in the first zone @@ -6316,14 +6379,18 @@ int __meminit init_per_zone_wmark_min(void) pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", new_min_free_kbytes, user_min_free_kbytes); } + +#ifndef CONFIG_LIB setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); setup_per_zone_inactive_ratio(); +#endif return 0; } module_init(init_per_zone_wmark_min) +#ifndef CONFIG_LIB /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes @@ -6344,6 +6411,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, } return 0; } +#endif #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, @@ -6379,6 +6447,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, } #endif +#ifndef CONFIG_LIB /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() @@ -6438,6 +6507,7 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, mutex_unlock(&pcp_batch_high_lock); return ret; } +#endif #ifdef CONFIG_NUMA int hashdist = HASHDIST_DEFAULT; @@ -6452,6 +6522,8 @@ static int __init set_hashdist(char *str) __setup("hashdist=", set_hashdist); #endif +#ifndef CONFIG_LIB + /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 @@ -6549,6 +6621,7 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } +#endif /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, diff --git a/mm/slib.c b/mm/slib.c index 974c8aed0275..4c597f3bce02 100644 --- a/mm/slib.c +++ b/mm/slib.c @@ -113,62 +113,11 @@ void kmem_cache_free(struct kmem_cache *cache, void *p) kfree(p); } -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) -{ - void *p; - struct page *page; - unsigned long pointer; - - /* typically, called from networking code by alloc_page or */ - /* directly with an order = 0. */ - if (order) - return NULL; - p = lib_malloc(sizeof(struct page) + (1 << PAGE_SHIFT)); - page = (struct page *)p; - - atomic_set(&page->_count, 1); - page->flags = 0; - pointer = (unsigned long)page; - pointer += sizeof(struct page); - page->virtual = (void *)pointer; - return page; -} -void __free_pages(struct page *page, unsigned int order) -{ - /* typically, called from networking code by __free_page */ - lib_assert(order == 0); - lib_free(page); -} - void put_page(struct page *page) { if (atomic_dec_and_test(&page->_count)) lib_free(page); } -unsigned long get_zeroed_page(gfp_t gfp_mask) -{ - return __get_free_pages(gfp_mask | __GFP_ZERO, 0); -} - -void *alloc_pages_exact(size_t size, gfp_t gfp_mask) -{ - return alloc_pages(gfp_mask, get_order(size)); -} - -unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) -{ - int size = (1 << order) * PAGE_SIZE; - void *p = kmalloc(size, gfp_mask); - - return (unsigned long)p; -} -void free_pages(unsigned long addr, unsigned int order) -{ - if (addr != 0) - kfree((void *)addr); -} void *vmalloc(unsigned long size) { @@ -201,9 +150,3 @@ void free_percpu(void __percpu *ptr) { kfree(ptr); } -void *__alloc_bootmem_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) -{ - return kzalloc(size, GFP_KERNEL); -} diff --git a/mm/slib_env.c b/mm/slib_env.c new file mode 100644 index 000000000000..0f253d110039 --- /dev/null +++ b/mm/slib_env.c @@ -0,0 +1,484 @@ +/* + * Library Slab Allocator (SLIB) + * + * Copyright (c) 2015 Yizheng Jiao + * + * Author: Yizheng Jiao + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "slib_env.h" +#include "sim.h" +#include "sim-assert.h" + +struct meminfo meminfo; +static void * __initdata vmalloc_min = + (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); + +phys_addr_t arm_lowmem_limit __initdata; + +unsigned int cacheid __read_mostly; + +static inline void +free_memmap(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + phys_addr_t pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; + + /* + * Convert to physical addresses, and + * round start upwards and end downwards. + */ + pg = PAGE_ALIGN(__pa(start_pg)); + pgend = __pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, + * free the section of the memmap array. + */ + if (pg < pgend) + memblock_free_early(pg, pgend - pg); +} + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap(void) +{ + unsigned long start, prev_end = 0; + struct memblock_region *reg; + + /* + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). + */ + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); + + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + + start = round_down(start, MAX_ORDER_NR_PAGES); + + /* + * If we had a previous bank, and there is a space + * between the current bank and the previous, free it. + */ + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); + } +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +#endif +} + +/* + * mem_init() marks the free areas in the mem_map and tells us how much + * memory is free. This is done after various parts of the system have + * claimed their memory after the kernel image. + */ +void __init mem_init(void) +{ + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); + + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); + free_highpages(); +} + +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) +{ + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + struct memblock_region *reg; + + /* + * initialise the zones. + */ + memset(zone_size, 0, sizeof(zone_size)); + + /* + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. + */ + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif + + /* + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) + */ + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + + zhole_size[0] -= low_end - start; + } +#ifdef CONFIG_HIGHMEM + if (end > max_low) { + unsigned long high_start = max(start, max_low); + + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } +#endif + } + + free_area_init_node(0, zone_size, min, zhole_size); +} + +void __init arm_memblock_init(void) +{ + memblock_dump_all(); +} + +int __init arm_add_memory(u64 start, u64 size) +{ + u64 aligned_start; + + /* + * Ensure that start/size are aligned to a page boundary. + * Size is rounded down, start is rounded up. + */ + aligned_start = PAGE_ALIGN(start); + if (aligned_start > start + size) + size = 0; + else + size -= aligned_start - start; + + if (aligned_start < PHYS_OFFSET) { + if (aligned_start + size <= PHYS_OFFSET) { + pr_info("Ignoring memory below PHYS_OFFSET1: 0x%08llx-0x%08llx\n", + aligned_start, aligned_start + size); + return -EINVAL; + } + + pr_info("Ignoring memory below PHYS_OFFSET2: 0x%08llx-0x%08llx\n", + aligned_start, (u64)PHYS_OFFSET); + + size -= PHYS_OFFSET - aligned_start; + aligned_start = PHYS_OFFSET; + } + + start = aligned_start; + size = size & ~(phys_addr_t)(PAGE_SIZE - 1); + + pr_info("[%s] start:%llu, size:%llu\n", __func__, start, size); + + /* + * Check whether this memory region has non-zero size or + * invalid node number. + */ + if (size == 0) + return -EINVAL; + + memblock_add(start, size); + return 0; +} + +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) +{ + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); +} + +static void __init arm_bootmem_init(unsigned long start_pfn, + unsigned long end_pfn) +{ + struct memblock_region *reg; + unsigned int boot_pages; + phys_addr_t bitmap; + pg_data_t *pgdat; + + /* + * Allocate the bootmem bitmap page. This must be in a region + * of memory which has already been mapped. + */ + boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); + + bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, + __pfn_to_phys(end_pfn)); + + /* + * Initialise the bootmem allocator, handing the + * memory banks over to bootmem. + */ + node_set_online(0); + pgdat = NODE_DATA(0); + init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); + + /* Free the lowmem regions from memblock into bootmem. */ + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + + free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); + } + + /* Reserve the lowmem memblock reserved regions in bootmem. */ + for_each_memblock(reserved, reg) { + unsigned long start = memblock_region_reserved_base_pfn(reg); + unsigned long end = memblock_region_reserved_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + reserve_bootmem(__pfn_to_phys(start), + (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); + } +} + +void __init bootmem_init(void) +{ + unsigned long min, max_low, max_high; + + find_limits(&min, &max_low, &max_high); + + pr_info("min:%lu\n", min); + pr_info("max_low:%lu\n", max_low); + pr_info("max_high:%lu\n", max_high); + + zone_sizes_init(min, max_low, max_high); + + /* + * This doesn't seem to be used by the Linux memory manager any + * more, but is used by ll_rw_block. If we can get rid of it, we + * also get rid of some of the stuff above as well. + */ + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; +} + +void __init paging_init(void) +{ + bootmem_init(); +} + +void __init sanity_check_meminfo(void) +{ + phys_addr_t memblock_limit = 0; + int highmem = 0; + phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + phys_addr_t block_start = reg->base; + phys_addr_t block_end = reg->base + reg->size; + phys_addr_t size_limit = reg->size; + + if (reg->base >= vmalloc_limit) + highmem = 1; + else + size_limit = vmalloc_limit - reg->base; + + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + + if (highmem) { + pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", + &block_start, &block_end); + memblock_remove(reg->base, reg->size); + continue; + } + + if (reg->size > size_limit) { + phys_addr_t overlap_size = + reg->size - size_limit; + + pr_notice("Truncating RAM at %pa-%pa to -%pa", + &block_start, &block_end, &vmalloc_limit); + memblock_remove(vmalloc_limit, overlap_size); + block_end = vmalloc_limit; + } + } + + if (!highmem) { + if (block_end > arm_lowmem_limit) { + if (reg->size > size_limit) + arm_lowmem_limit = vmalloc_limit; + else + arm_lowmem_limit = block_end; + } + + /* + * Find the first non-section-aligned page, and point + * memblock_limit at it. This relies on rounding the + * limit down to be section-aligned, which happens at + * the end of this function. + * + * With this algorithm, the start or end of almost any + * bank can be non-section-aligned. The only exception + * is that the start of the bank 0 must be section- + * aligned, since otherwise memory would need to be + * allocated when mapping the start of bank 0, which + * occurs before any free memory is mapped. + */ + if (!memblock_limit) { + if (!IS_ALIGNED(block_start, SECTION_SIZE)) + memblock_limit = block_start; + else if (!IS_ALIGNED(block_end, SECTION_SIZE)) + memblock_limit = arm_lowmem_limit; + } + } + } + + high_memory = __va(arm_lowmem_limit - 1) + 1; + + /* + * Round the memblock limit down to a section size. This + * helps to ensure that we will allocate memory from the + * last full section, which should be mapped. + */ + if (memblock_limit) + memblock_limit = round_down(memblock_limit, SECTION_SIZE); + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + + memblock_set_current_limit(memblock_limit); +} + +char *total_ram; + +void __init setup_arch(char **cmd) +{ + int ret; + + ret = arm_add_memory(0, 1024 * 1024 * 1024 * 1); + if (ret) + pr_info("arm_add_memory failed in %s\n", __func__); + + total_ram = lib_malloc(1024 * 1024 * 1024 * 1); + if (total_ram == NULL) + pr_info("Alloc memory failed in %s\n", __func__); + + sanity_check_meminfo(); + arm_memblock_init(); + paging_init(); +} + +void *kmap_atomic(struct page *page) +{ + return (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); +} + +void __kunmap_atomic(void *kvaddr) +{ + +} + +/* + * Set up kernel memory allocators + */ +static void __init mm_init(void) +{ + mem_init(); +} + +void __init init_memory_system(void) +{ + setup_arch(NULL); + page_alloc_init(); + build_all_zonelists(NULL, NULL); + mm_init(); +} + +void test(void) +{ + pg_data_t *pgdat = NODE_DATA(nid); + + alloc_pages(GFP_KERNEL, 1); +} diff --git a/mm/slib_env.h b/mm/slib_env.h new file mode 100644 index 000000000000..6968b746eebc --- /dev/null +++ b/mm/slib_env.h @@ -0,0 +1,54 @@ +/* + * Library Slab Allocator (SLIB) + * + * Copyright (c) 2015 Yizheng Jiao + * + * Author: Yizheng Jiao + */ + +#ifndef SLIB_ENV_H +#define SLIB_ENV_H + +#include +#include + +/* From arm/include/asm/memory.h */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) + +/* + * Memory map description: from arm/include/asm/setup.h + */ +#ifdef CONFIG_ARM_NR_BANKS +#define NR_BANKS CONFIG_ARM_NR_BANKS +#else +#define NR_BANKS 16 +#endif + +struct membank { + phys_addr_t start; + unsigned long size; + unsigned int highmem; +}; + +struct meminfo { + int nr_banks; + struct membank bank[NR_BANKS]; +}; + +extern struct meminfo meminfo; + +#define for_each_bank(iter, mi) \ + for (iter = 0; iter < (mi)->nr_banks; iter++) + +#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) +#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) +#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) +#define bank_phys_start(bank) ((bank)->start) +#define bank_phys_end(bank) ((bank)->start + (bank)->size) +#define bank_phys_size(bank) ((bank)->size) + +void __init init_memory_system(void); +extern char *total_ram; + +#endif