Преглед на файлове

Merge branch 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm:
  [ARM] nommu: backtrace code must not reference a discarded section
  [ARM] nommu: Initial uCLinux support for MMU-based CPUs
  [ARM] nommu: prevent Xscale-based machines being selected
  [ARM] nommu: export flush_dcache_page()
  [ARM] nommu: remove fault-armv, mmap and mm-armv files from nommu build
  [ARM] Remove TABLE_SIZE, and several unused function prototypes
  [ARM] nommu: Provide a simple flush_dcache_page implementation
  [ARM] nommu: add arch/arm/Kconfig-nommu to Kconfig files
  [ARM] nommu: add stubs for ioremap and friends
  [ARM] nommu: avoid selecting TLB and CPU specific copy code
  [ARM] nommu: uaccess tweaks
  [ARM] nommu: adjust headers for !MMU ARM systems
  [ARM] nommu: we need the TLS register emulation for nommu mode
Linus Torvalds преди 19 години
родител
ревизия
27d68a36c4

+ 9 - 0
arch/arm/Kconfig

@@ -188,23 +188,27 @@ config ARCH_IMX
 
 config ARCH_IOP3XX
 	bool "IOP3xx-based"
+	depends on MMU
 	select PCI
 	help
 	  Support for Intel's IOP3XX (XScale) family of processors.
 
 config ARCH_IXP4XX
 	bool "IXP4xx-based"
+	depends on MMU
 	help
 	  Support for Intel's IXP4XX (XScale) family of processors.
 
 config ARCH_IXP2000
 	bool "IXP2400/2800-based"
+	depends on MMU
 	select PCI
 	help
 	  Support for Intel's IXP2400/2800 (XScale) family of processors.
 
 config ARCH_IXP23XX
  	bool "IXP23XX-based"
+	depends on MMU
  	select PCI
 	help
 	  Support for Intel's IXP23xx (XScale) family of processors.
@@ -229,6 +233,7 @@ config ARCH_PNX4008
 
 config ARCH_PXA
 	bool "PXA2xx-based"
+	depends on MMU
 	select ARCH_MTD_XIP
 	help
 	  Support for Intel's PXA2XX processor line.
@@ -339,6 +344,10 @@ config XSCALE_PMU
 	depends on CPU_XSCALE && !XSCALE_PMU_TIMER
 	default y
 
+if !MMU
+source "arch/arm/Kconfig-nommu"
+endif
+
 endmenu
 
 source "arch/arm/common/Kconfig"

+ 5 - 2
arch/arm/kernel/armksyms.c

@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(__memzero);
 
 	/* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
 EXPORT_SYMBOL(__put_user_2);
 EXPORT_SYMBOL(__put_user_4);
 EXPORT_SYMBOL(__put_user_8);
+#endif
 
 	/* crypto hash */
 EXPORT_SYMBOL(sha_transform);

+ 8 - 0
arch/arm/kernel/vmlinux.lds.S

@@ -80,6 +80,10 @@ SECTIONS
 		*(.exit.text)
 		*(.exit.data)
 		*(.exitcall.exit)
+#ifndef CONFIG_MMU
+		*(.fixup)
+		*(__ex_table)
+#endif
 	}
 
 	.text : {			/* Real text segment		*/
@@ -87,7 +91,9 @@ SECTIONS
 			*(.text)
 			SCHED_TEXT
 			LOCK_TEXT
+#ifdef CONFIG_MMU
 			*(.fixup)
+#endif
 			*(.gnu.warning)
 			*(.rodata)
 			*(.rodata.*)
@@ -142,7 +148,9 @@ SECTIONS
 		 */
 		. = ALIGN(32);
 		__start___ex_table = .;
+#ifdef CONFIG_MMU
 		*(__ex_table)
+#endif
 		__stop___ex_table = .;
 
 		/*

+ 8 - 5
arch/arm/lib/Makefile

@@ -6,28 +6,31 @@
 
 lib-y		:= backtrace.o changebit.o csumipv6.o csumpartial.o   \
 		   csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
-		   copy_page.o delay.o findbit.o memchr.o memcpy.o    \
+		   delay.o findbit.o memchr.o memcpy.o		      \
 		   memmove.o memset.o memzero.o setbit.o              \
 		   strncpy_from_user.o strnlen_user.o                 \
 		   strchr.o strrchr.o                                 \
 		   testchangebit.o testclearbit.o testsetbit.o        \
-		   getuser.o putuser.o clear_user.o                   \
 		   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
 		   ucmpdi2.o lib1funcs.o div64.o sha1.o               \
 		   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
 
+mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o
+
 # the code in uaccess.S is not preemption safe and
 # probably faster on ARMv3 only
 ifeq ($(CONFIG_PREEMPT),y)
-  lib-y	+= copy_from_user.o copy_to_user.o
+  mmu-y	+= copy_from_user.o copy_to_user.o
 else
 ifneq ($(CONFIG_CPU_32v3),y)
-  lib-y	+= copy_from_user.o copy_to_user.o
+  mmu-y	+= copy_from_user.o copy_to_user.o
 else
-  lib-y	+= uaccess.o
+  mmu-y	+= uaccess.o
 endif
 endif
 
+lib-$(CONFIG_MMU) += $(mmu-y)
+
 ifeq ($(CONFIG_CPU_32v3),y)
   lib-y	+= io-readsw-armv3.o io-writesw-armv3.o
 else

+ 1 - 4
arch/arm/lib/backtrace.S

@@ -97,16 +97,13 @@ ENTRY(c_backtrace)
 		b	1007f
 
 /*
- * Fixup for LDMDB
+ * Fixup for LDMDB.  Note that this must not be in the fixup section.
  */
-		.section .fixup,"ax"
-		.align	0
 1007:		ldr	r0, =.Lbad
 		mov	r1, frame
 		bl	printk
 		ldmfd	sp!, {r4 - r8, pc}
 		.ltorg
-		.previous
 		
 		.section __ex_table,"a"
 		.align	3

+ 35 - 32
arch/arm/mm/Kconfig

@@ -15,8 +15,8 @@ config CPU_ARM610
 	select CPU_32v3
 	select CPU_CACHE_V3
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V3
-	select CPU_TLB_V3
+	select CPU_COPY_V3 if MMU
+	select CPU_TLB_V3 if MMU
 	help
 	  The ARM610 is the successor to the ARM3 processor
 	  and was produced by VLSI Technology Inc.
@@ -31,8 +31,8 @@ config CPU_ARM710
 	select CPU_32v3
 	select CPU_CACHE_V3
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V3
-	select CPU_TLB_V3
+	select CPU_COPY_V3 if MMU
+	select CPU_TLB_V3 if MMU
 	help
 	  A 32-bit RISC microprocessor based on the ARM7 processor core
 	  designed by Advanced RISC Machines Ltd. The ARM710 is the
@@ -50,8 +50,8 @@ config CPU_ARM720T
 	select CPU_ABRT_LV4T
 	select CPU_CACHE_V4
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WT
-	select CPU_TLB_V4WT
+	select CPU_COPY_V4WT if MMU
+	select CPU_TLB_V4WT if MMU
 	help
 	  A 32-bit RISC processor with 8kByte Cache, Write Buffer and
 	  MMU built around an ARM7TDMI core.
@@ -68,8 +68,8 @@ config CPU_ARM920T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
 	help
 	  The ARM920T is licensed to be produced by numerous vendors,
 	  and is used in the Maverick EP9312 and the Samsung S3C2410.
@@ -89,8 +89,8 @@ config CPU_ARM922T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
 	help
 	  The ARM922T is a version of the ARM920T, but with smaller
 	  instruction and data caches. It is used in Altera's
@@ -108,8 +108,8 @@ config CPU_ARM925T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
  	help
  	  The ARM925T is a mix between the ARM920T and ARM926T, but with
 	  different instruction and data caches. It is used in TI's OMAP
@@ -126,8 +126,8 @@ config CPU_ARM926T
 	select CPU_32v5
 	select CPU_ABRT_EV5TJ
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
 	help
 	  This is a variant of the ARM920.  It has slightly different
 	  instruction sequences for cache and TLB operations.  Curiously,
@@ -144,8 +144,8 @@ config CPU_ARM1020
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
 	help
 	  The ARM1020 is the 32K cached version of the ARM10 processor,
 	  with an addition of a floating-point unit.
@@ -161,8 +161,8 @@ config CPU_ARM1020E
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WBI if MMU
 	depends on n
 
 # ARM1022E
@@ -172,8 +172,8 @@ config CPU_ARM1022
 	select CPU_32v5
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB # can probably do better
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU # can probably do better
+	select CPU_TLB_V4WBI if MMU
 	help
 	  The ARM1022E is an implementation of the ARMv5TE architecture
 	  based upon the ARM10 integer core with a 16KiB L1 Harvard cache,
@@ -189,8 +189,8 @@ config CPU_ARM1026
 	select CPU_32v5
 	select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB # can probably do better
-	select CPU_TLB_V4WBI
+	select CPU_COPY_V4WB if MMU # can probably do better
+	select CPU_TLB_V4WBI if MMU
 	help
 	  The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture
 	  based upon the ARM10 integer core.
@@ -207,8 +207,8 @@ config CPU_SA110
 	select CPU_ABRT_EV4
 	select CPU_CACHE_V4WB
 	select CPU_CACHE_VIVT
-	select CPU_COPY_V4WB
-	select CPU_TLB_V4WB
+	select CPU_COPY_V4WB if MMU
+	select CPU_TLB_V4WB if MMU
 	help
 	  The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and
 	  is available at five speeds ranging from 100 MHz to 233 MHz.
@@ -227,7 +227,7 @@ config CPU_SA1100
 	select CPU_ABRT_EV4
 	select CPU_CACHE_V4WB
 	select CPU_CACHE_VIVT
-	select CPU_TLB_V4WB
+	select CPU_TLB_V4WB if MMU
 
 # XScale
 config CPU_XSCALE
@@ -237,7 +237,7 @@ config CPU_XSCALE
 	select CPU_32v5
 	select CPU_ABRT_EV5T
 	select CPU_CACHE_VIVT
-	select CPU_TLB_V4WBI
+	select CPU_TLB_V4WBI if MMU
 
 # XScale Core Version 3
 config CPU_XSC3
@@ -247,7 +247,7 @@ config CPU_XSC3
 	select CPU_32v5
 	select CPU_ABRT_EV5T
 	select CPU_CACHE_VIVT
-	select CPU_TLB_V4WBI
+	select CPU_TLB_V4WBI if MMU
 	select IO_36
 
 # ARMv6
@@ -258,8 +258,8 @@ config CPU_V6
 	select CPU_ABRT_EV6
 	select CPU_CACHE_V6
 	select CPU_CACHE_VIPT
-	select CPU_COPY_V6
-	select CPU_TLB_V6
+	select CPU_COPY_V6 if MMU
+	select CPU_TLB_V6 if MMU
 
 # ARMv6k
 config CPU_32v6K
@@ -277,17 +277,17 @@ config CPU_32v6K
 # This defines the compiler instruction set which depends on the machine type.
 config CPU_32v3
 	bool
-	select TLS_REG_EMUL if SMP
+	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 
 config CPU_32v4
 	bool
-	select TLS_REG_EMUL if SMP
+	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 
 config CPU_32v5
 	bool
-	select TLS_REG_EMUL if SMP
+	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 
 config CPU_32v6
@@ -334,6 +334,7 @@ config CPU_CACHE_VIVT
 config CPU_CACHE_VIPT
 	bool
 
+if MMU
 # The copy-page model
 config CPU_COPY_V3
 	bool
@@ -372,6 +373,8 @@ config CPU_TLB_V4WBI
 config CPU_TLB_V6
 	bool
 
+endif
+
 #
 # CPU supports 36-bit I/O
 #

+ 8 - 2
arch/arm/mm/Makefile

@@ -2,10 +2,16 @@
 # Makefile for the linux arm-specific parts of the memory manager.
 #
 
-obj-y				:= consistent.o extable.o fault-armv.o \
-				   fault.o flush.o init.o ioremap.o mmap.o \
+obj-y				:= consistent.o extable.o fault.o init.o \
+				   iomap.o
+
+obj-$(CONFIG_MMU)		+= fault-armv.o flush.o ioremap.o mmap.o \
 				   mm-armv.o
 
+ifneq ($(CONFIG_MMU),y)
+obj-y				+= nommu.o
+endif
+
 obj-$(CONFIG_MODULES)		+= proc-syms.o
 
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o

+ 0 - 2
arch/arm/mm/init.c

@@ -26,8 +26,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
-#define TABLE_SIZE	(2 * PTRS_PER_PTE * sizeof(pte_t))
-
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

+ 55 - 0
arch/arm/mm/iomap.c

@@ -0,0 +1,55 @@
+/*
+ *  linux/arch/arm/mm/iomap.c
+ *
+ * Map IO port and PCI memory spaces so that {read,write}[bwl] can
+ * be used to access this memory.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+
+#ifdef __io
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return __io(port);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
+#ifdef CONFIG_PCI
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len   = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+	if ((unsigned long)addr >= VMALLOC_START &&
+	    (unsigned long)addr < VMALLOC_END)
+		iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif

+ 0 - 47
arch/arm/mm/ioremap.c

@@ -176,50 +176,3 @@ void __iounmap(void __iomem *addr)
 	vunmap((void *)(PAGE_MASK & (unsigned long)addr));
 }
 EXPORT_SYMBOL(__iounmap);
-
-#ifdef __io
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
-	return __io(port);
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_unmap);
-#endif
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#include <linux/ioport.h>
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len   = pci_resource_len(dev, bar);
-	unsigned long flags = pci_resource_flags(dev, bar);
-
-	if (!len || !start)
-		return NULL;
-	if (maxlen && len > maxlen)
-		len = maxlen;
-	if (flags & IORESOURCE_IO)
-		return ioport_map(start, len);
-	if (flags & IORESOURCE_MEM) {
-		if (flags & IORESOURCE_CACHEABLE)
-			return ioremap(start, len);
-		return ioremap_nocache(start, len);
-	}
-	return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-	if ((unsigned long)addr >= VMALLOC_START &&
-	    (unsigned long)addr < VMALLOC_END)
-		iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iounmap);
-#endif

+ 39 - 0
arch/arm/mm/nommu.c

@@ -0,0 +1,39 @@
+/*
+ *  linux/arch/arm/mm/nommu.c
+ *
+ * ARM uCLinux supporting functions.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+void flush_dcache_page(struct page *page)
+{
+	__cpuc_flush_dcache_page(page_address(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset,
+			    size_t size, unsigned long flags)
+{
+	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
+		return NULL;
+	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
+}
+EXPORT_SYMBOL(__ioremap_pfn);
+
+void __iomem *__ioremap(unsigned long phys_addr, size_t size,
+			unsigned long flags)
+{
+	return (void __iomem *)phys_addr;
+}
+EXPORT_SYMBOL(__ioremap);
+
+void __iounmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(__iounmap);

+ 9 - 0
arch/arm/mm/proc-arm1020.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -101,7 +102,9 @@ ENTRY(cpu_arm1020_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f 		@ ............wcam
 	bic	ip, ip, #0x1100 		@ ...i...s........
@@ -359,6 +362,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm1020_switch_mm)
+#ifdef CONFIG_MMU
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mcr	p15, 0, r3, c7, c10, 4
 	mov	r1, #0xF			@ 16 segments
@@ -383,6 +387,7 @@ ENTRY(cpu_arm1020_switch_mm)
 	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
+#endif /* CONFIG_MMU */
 	mov	pc, lr
         
 /*
@@ -392,6 +397,7 @@ ENTRY(cpu_arm1020_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm1020_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -421,6 +427,7 @@ ENTRY(cpu_arm1020_set_pte)
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -430,7 +437,9 @@ __arm1020_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, arm1020_cr1_clear
 	bic	r0, r0, r5

+ 9 - 0
arch/arm/mm/proc-arm1020e.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -101,7 +102,9 @@ ENTRY(cpu_arm1020e_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f 		@ ............wcam
 	bic	ip, ip, #0x1100 		@ ...i...s........
@@ -344,6 +347,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm1020e_switch_mm)
+#ifdef CONFIG_MMU
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mcr	p15, 0, r3, c7, c10, 4
 	mov	r1, #0xF			@ 16 segments
@@ -367,6 +371,7 @@ ENTRY(cpu_arm1020e_switch_mm)
 	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
         
 /*
@@ -376,6 +381,7 @@ ENTRY(cpu_arm1020e_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm1020e_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -403,6 +409,7 @@ ENTRY(cpu_arm1020e_set_pte)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -412,7 +419,9 @@ __arm1020e_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, arm1020e_cr1_clear
 	bic	r0, r0, r5

+ 9 - 0
arch/arm/mm/proc-arm1022.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -90,7 +91,9 @@ ENTRY(cpu_arm1022_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f 		@ ............wcam
 	bic	ip, ip, #0x1100 		@ ...i...s........
@@ -333,6 +336,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm1022_switch_mm)
+#ifdef CONFIG_MMU
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 16 segments
 1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -349,6 +353,7 @@ ENTRY(cpu_arm1022_switch_mm)
 	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
         
 /*
@@ -358,6 +363,7 @@ ENTRY(cpu_arm1022_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm1022_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -385,6 +391,7 @@ ENTRY(cpu_arm1022_set_pte)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -394,7 +401,9 @@ __arm1022_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, arm1022_cr1_clear
 	bic	r0, r0, r5

+ 9 - 0
arch/arm/mm/proc-arm1026.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -90,7 +91,9 @@ ENTRY(cpu_arm1026_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f 		@ ............wcam
 	bic	ip, ip, #0x1100 		@ ...i...s........
@@ -327,6 +330,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm1026_switch_mm)
+#ifdef CONFIG_MMU
 	mov	r1, #0
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 1:	mrc	p15, 0, r15, c7, c14, 3		@ test, clean, invalidate
@@ -338,6 +342,7 @@ ENTRY(cpu_arm1026_switch_mm)
 	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
         
 /*
@@ -347,6 +352,7 @@ ENTRY(cpu_arm1026_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm1026_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -374,6 +380,7 @@ ENTRY(cpu_arm1026_set_pte)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 
@@ -384,8 +391,10 @@ __arm1026_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
 	mcr	p15, 0, r4, c2, c0		@ load page table pointer
+#endif
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mov	r0, #4				@ explicitly disable writeback
 	mcr	p15, 7, r0, c15, c0, 0

+ 15 - 0
arch/arm/mm/proc-arm6_7.S

@@ -2,6 +2,7 @@
  *  linux/arch/arm/mm/proc-arm6,7.S
  *
  *  Copyright (C) 1997-2000 Russell King
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -199,10 +200,12 @@ ENTRY(cpu_arm7_do_idle)
  */
 ENTRY(cpu_arm6_switch_mm)
 ENTRY(cpu_arm7_switch_mm)
+#ifdef CONFIG_MMU
 		mov	r1, #0
 		mcr	p15, 0, r1, c7, c0, 0		@ flush cache
 		mcr	p15, 0, r0, c2, c0, 0		@ update page table ptr
 		mcr	p15, 0, r1, c5, c0, 0		@ flush TLBs
+#endif
 		mov	pc, lr
 
 /*
@@ -214,6 +217,7 @@ ENTRY(cpu_arm7_switch_mm)
 		.align	5
 ENTRY(cpu_arm6_set_pte)
 ENTRY(cpu_arm7_set_pte)
+#ifdef CONFIG_MMU
 		str	r1, [r0], #-2048		@ linux version
 
 		eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -232,6 +236,7 @@ ENTRY(cpu_arm7_set_pte)
 		movne	r2, #0
 
 		str	r2, [r0]			@ hardware version
+#endif /* CONFIG_MMU */
 		mov	pc, lr
 
 /*
@@ -243,7 +248,9 @@ ENTRY(cpu_arm6_reset)
 ENTRY(cpu_arm7_reset)
 		mov	r1, #0
 		mcr	p15, 0, r1, c7, c0, 0		@ flush cache
+#ifdef CONFIG_MMU
 		mcr	p15, 0, r1, c5, c0, 0		@ flush TLB
+#endif
 		mov	r1, #0x30
 		mcr	p15, 0, r1, c1, c0, 0		@ turn off MMU etc
 		mov	pc, r0
@@ -253,19 +260,27 @@ ENTRY(cpu_arm7_reset)
 		.type	__arm6_setup, #function
 __arm6_setup:	mov	r0, #0
 		mcr	p15, 0, r0, c7, c0		@ flush caches on v3
+#ifdef CONFIG_MMU
 		mcr	p15, 0, r0, c5, c0		@ flush TLBs on v3
 		mov	r0, #0x3d			@ . ..RS BLDP WCAM
 		orr	r0, r0, #0x100			@ . ..01 0011 1101
+#else
+		mov	r0, #0x3c			@ . ..RS BLDP WCA.
+#endif
 		mov	pc, lr
 		.size	__arm6_setup, . - __arm6_setup
 
 		.type	__arm7_setup, #function
 __arm7_setup:	mov	r0, #0
 		mcr	p15, 0, r0, c7, c0		@ flush caches on v3
+#ifdef CONFIG_MMU
 		mcr	p15, 0, r0, c5, c0		@ flush TLBs on v3
 		mcr	p15, 0, r0, c3, c0		@ load domain access register
 		mov	r0, #0x7d			@ . ..RS BLDP WCAM
 		orr	r0, r0, #0x100			@ . ..01 0111 1101
+#else
+		mov	r0, #0x7c			@ . ..RS BLDP WCA.
+#endif
 		mov	pc, lr
 		.size	__arm7_setup, . - __arm7_setup
 

+ 12 - 0
arch/arm/mm/proc-arm720.S

@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Steve Hill (sjhill@cotw.com)
  *                     Rob Scott (rscott@mtrob.fdns.net)
  *  Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2004.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,6 +30,7 @@
  *			out of 'proc-arm6,7.S' per RMK discussion
  *   07-25-2000 SJH	Added idle function.
  *   08-25-2000	DBS	Updated for integration of ARM Ltd version.
+ *   04-20-2004 HSC	modified for non-paged memory management mode.
  */
 #include <linux/linkage.h>
 #include <linux/init.h>
@@ -75,10 +77,12 @@ ENTRY(cpu_arm720_do_idle)
  *	     the new.
  */
 ENTRY(cpu_arm720_switch_mm)
+#ifdef CONFIG_MMU
 		mov	r1, #0
 		mcr	p15, 0, r1, c7, c7, 0		@ invalidate cache
 		mcr	p15, 0, r0, c2, c0, 0		@ update page table ptr
 		mcr	p15, 0, r1, c8, c7, 0		@ flush TLB (v4)
+#endif
 		mov	pc, lr
 
 /*
@@ -89,6 +93,7 @@ ENTRY(cpu_arm720_switch_mm)
  */
 		.align	5
 ENTRY(cpu_arm720_set_pte)
+#ifdef CONFIG_MMU
 		str	r1, [r0], #-2048		@ linux version
 
 		eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -107,6 +112,7 @@ ENTRY(cpu_arm720_set_pte)
 		movne	r2, #0
 
 		str	r2, [r0]			@ hardware version
+#endif
 		mov	pc, lr
 
 /*
@@ -117,7 +123,9 @@ ENTRY(cpu_arm720_set_pte)
 ENTRY(cpu_arm720_reset)
 		mov	ip, #0
 		mcr	p15, 0, ip, c7, c7, 0		@ invalidate cache
+#ifdef CONFIG_MMU
 		mcr	p15, 0, ip, c8, c7, 0		@ flush TLB (v4)
+#endif
 		mrc	p15, 0, ip, c1, c0, 0		@ get ctrl register
 		bic	ip, ip, #0x000f			@ ............wcam
 		bic	ip, ip, #0x2100			@ ..v....s........
@@ -130,7 +138,9 @@ ENTRY(cpu_arm720_reset)
 __arm710_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ invalidate caches
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7, 0		@ flush TLB (v4)
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register
 	ldr	r5, arm710_cr1_clear
 	bic	r0, r0, r5
@@ -156,7 +166,9 @@ arm710_cr1_set:
 __arm720_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ invalidate caches
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7, 0		@ flush TLB (v4)
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register
 	ldr	r5, arm720_cr1_clear
 	bic	r0, r0, r5

+ 9 - 0
arch/arm/mm/proc-arm920.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 1999,2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -97,7 +98,9 @@ ENTRY(cpu_arm920_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -317,6 +320,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm920_switch_mm)
+#ifdef CONFIG_MMU
 	mov	ip, #0
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
@@ -337,6 +341,7 @@ ENTRY(cpu_arm920_switch_mm)
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
 
 /*
@@ -346,6 +351,7 @@ ENTRY(cpu_arm920_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm920_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -372,6 +378,7 @@ ENTRY(cpu_arm920_set_pte)
 	mov	r0, r0
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -381,7 +388,9 @@ __arm920_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, arm920_cr1_clear
 	bic	r0, r0, r5

+ 9 - 0
arch/arm/mm/proc-arm922.S

@@ -4,6 +4,7 @@
  *  Copyright (C) 1999,2000 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
  *  Copyright (C) 2001 Altera Corporation
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -99,7 +100,9 @@ ENTRY(cpu_arm922_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -321,6 +324,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm922_switch_mm)
+#ifdef CONFIG_MMU
 	mov	ip, #0
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
@@ -341,6 +345,7 @@ ENTRY(cpu_arm922_switch_mm)
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
 
 /*
@@ -350,6 +355,7 @@ ENTRY(cpu_arm922_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm922_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -376,6 +382,7 @@ ENTRY(cpu_arm922_set_pte)
 	mov	r0, r0
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -385,7 +392,9 @@ __arm922_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, arm922_cr1_clear
 	bic	r0, r0, r5

+ 10 - 0
arch/arm/mm/proc-arm925.S

@@ -9,6 +9,8 @@
  *  Update for Linux-2.6 and cache flush improvements
  *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
  *
+ *  hacked for non-paged-MM by Hyok S. Choi, 2004.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -122,7 +124,9 @@ ENTRY(cpu_arm925_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -369,6 +373,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm925_switch_mm)
+#ifdef CONFIG_MMU
 	mov	ip, #0
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
@@ -383,6 +388,7 @@ ENTRY(cpu_arm925_switch_mm)
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
 
 /*
@@ -392,6 +398,7 @@ ENTRY(cpu_arm925_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm925_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -420,6 +427,7 @@ ENTRY(cpu_arm925_set_pte)
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 
 	__INIT
@@ -438,7 +446,9 @@ __arm925_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mov	r0, #4				@ disable write-back on caches explicitly

+ 9 - 0
arch/arm/mm/proc-arm926.S

@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 1999-2001 ARM Limited
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -85,7 +86,9 @@ ENTRY(cpu_arm926_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -329,6 +332,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_arm926_switch_mm)
+#ifdef CONFIG_MMU
 	mov	ip, #0
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
@@ -341,6 +345,7 @@ ENTRY(cpu_arm926_switch_mm)
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mov	pc, lr
 
 /*
@@ -350,6 +355,7 @@ ENTRY(cpu_arm926_switch_mm)
  */
 	.align	5
 ENTRY(cpu_arm926_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -378,6 +384,7 @@ ENTRY(cpu_arm926_set_pte)
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 #endif
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif
 	mov	pc, lr
 
 	__INIT
@@ -387,7 +394,9 @@ __arm926_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 
 
 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH

+ 11 - 0
arch/arm/mm/proc-sa110.S

@@ -2,6 +2,7 @@
  *  linux/arch/arm/mm/proc-sa110.S
  *
  *  Copyright (C) 1997-2002 Russell King
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -67,7 +68,9 @@ ENTRY(cpu_sa110_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -130,11 +133,15 @@ ENTRY(cpu_sa110_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_sa110_switch_mm)
+#ifdef CONFIG_MMU
 	str	lr, [sp, #-4]!
 	bl	v4wb_flush_kern_cache_all	@ clears IP
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
 	ldr	pc, [sp], #4
+#else
+	mov	pc, lr
+#endif
 
 /*
  * cpu_sa110_set_pte(ptep, pte)
@@ -143,6 +150,7 @@ ENTRY(cpu_sa110_switch_mm)
  */
 	.align	5
 ENTRY(cpu_sa110_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -164,6 +172,7 @@ ENTRY(cpu_sa110_set_pte)
 	mov	r0, r0
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif
 	mov	pc, lr
 
 	__INIT
@@ -173,7 +182,9 @@ __sa110_setup:
 	mov	r10, #0
 	mcr	p15, 0, r10, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r10, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r10, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, sa110_cr1_clear
 	bic	r0, r0, r5

+ 11 - 0
arch/arm/mm/proc-sa1100.S

@@ -2,6 +2,7 @@
  *  linux/arch/arm/mm/proc-sa1100.S
  *
  *  Copyright (C) 1997-2002 Russell King
+ *  hacked for non-paged-MM by Hyok S. Choi, 2003.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -77,7 +78,9 @@ ENTRY(cpu_sa1100_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+#ifdef CONFIG_MMU
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
+#endif
 	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 	bic	ip, ip, #0x000f			@ ............wcam
 	bic	ip, ip, #0x1100			@ ...i...s........
@@ -142,12 +145,16 @@ ENTRY(cpu_sa1100_dcache_clean_area)
  */
 	.align	5
 ENTRY(cpu_sa1100_switch_mm)
+#ifdef CONFIG_MMU
 	str	lr, [sp, #-4]!
 	bl	v4wb_flush_kern_cache_all	@ clears IP
 	mcr	p15, 0, ip, c9, c0, 0		@ invalidate RB
 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
 	ldr	pc, [sp], #4
+#else
+	mov	pc, lr
+#endif
 
 /*
  * cpu_sa1100_set_pte(ptep, pte)
@@ -156,6 +163,7 @@ ENTRY(cpu_sa1100_switch_mm)
  */
 	.align	5
 ENTRY(cpu_sa1100_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -177,6 +185,7 @@ ENTRY(cpu_sa1100_set_pte)
 	mov	r0, r0
 	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+#endif
 	mov	pc, lr
 
 	__INIT
@@ -186,7 +195,9 @@ __sa1100_setup:
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
+#endif
 	mrc	p15, 0, r0, c1, c0		@ get control register v4
 	ldr	r5, sa1100_cr1_clear
 	bic	r0, r0, r5

+ 7 - 0
arch/arm/mm/proc-v6.S

@@ -2,6 +2,7 @@
  *  linux/arch/arm/mm/proc-v6.S
  *
  *  Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *  Modified by Catalin Marinas for noMMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -88,6 +89,7 @@ ENTRY(cpu_v6_dcache_clean_area)
  *	- we are not using split page tables
  */
 ENTRY(cpu_v6_switch_mm)
+#ifdef CONFIG_MMU
 	mov	r2, #0
 	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
 #ifdef CONFIG_SMP
@@ -97,6 +99,7 @@ ENTRY(cpu_v6_switch_mm)
 	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
 	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
 	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
+#endif
 	mov	pc, lr
 
 /*
@@ -119,6 +122,7 @@ ENTRY(cpu_v6_switch_mm)
  *	  1111   0   1   1	r/w	r/w
  */
 ENTRY(cpu_v6_set_pte)
+#ifdef CONFIG_MMU
 	str	r1, [r0], #-2048		@ linux version
 
 	bic	r2, r1, #0x000003f0
@@ -145,6 +149,7 @@ ENTRY(cpu_v6_set_pte)
 
 	str	r2, [r0]
 	mcr	p15, 0, r0, c7, c10, 1 @ flush_pte
+#endif
 	mov	pc, lr
 
 
@@ -194,12 +199,14 @@ __v6_setup:
 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 	mcr	p15, 0, r0, c7, c15, 0		@ clean+invalidate cache
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+#ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7, 0		@ invalidate I + D TLBs
 	mcr	p15, 0, r0, c2, c0, 2		@ TTB control register
 #ifdef CONFIG_SMP
 	orr	r4, r4, #TTB_RGN_WBWA|TTB_S	@ mark PTWs shared, outer cacheable
 #endif
 	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
+#endif /* CONFIG_MMU */
 #ifdef CONFIG_VFP
 	mrc	p15, 0, r0, c1, c0, 2
 	orr	r0, r0, #(0xf << 20)

+ 4 - 0
include/asm-arm/bugs.h

@@ -10,8 +10,12 @@
 #ifndef __ASM_BUGS_H
 #define __ASM_BUGS_H
 
+#ifdef CONFIG_MMU
 extern void check_writebuffer_bugs(void);
 
 #define check_bugs() check_writebuffer_bugs()
+#else
+#define check_bugs() do { } while (0)
+#endif
 
 #endif

+ 7 - 0
include/asm-arm/domain.h

@@ -50,6 +50,8 @@
 #define domain_val(dom,type)	((type) << (2*(dom)))
 
 #ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
 #define set_domain(x)					\
 	do {						\
 	__asm__ __volatile__(				\
@@ -66,5 +68,10 @@
 	set_domain(thread->cpu_domain);				\
 	} while (0)
 
+#else
+#define set_domain(x)		do { } while (0)
+#define modify_domain(dom,type)	do { } while (0)
+#endif
+
 #endif
 #endif /* !__ASSEMBLY__ */

+ 4 - 5
include/asm-arm/mach/map.h

@@ -16,8 +16,6 @@ struct map_desc {
 	unsigned int type;
 };
 
-struct meminfo;
-
 #define MT_DEVICE		0
 #define MT_CACHECLEAN		1
 #define MT_MINICLEAN		2
@@ -28,7 +26,8 @@ struct meminfo;
 #define MT_IXP2000_DEVICE	7
 #define MT_NONSHARED_DEVICE	8
 
-extern void create_memmap_holes(struct meminfo *);
-extern void memtable_init(struct meminfo *);
+#ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
-extern void setup_io_desc(void);
+#else
+#define iotable_init(map,num)	do { } while (0)
+#endif

+ 57 - 18
include/asm-arm/memory.h

@@ -2,6 +2,7 @@
  *  linux/include/asm-arm/memory.h
  *
  *  Copyright (C) 2000-2002 Russell King
+ *  modification for nommu, Hyok S. Choi, 2004
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,8 @@
 #include <asm/arch/memory.h>
 #include <asm/sizes.h>
 
+#ifdef CONFIG_MMU
+
 #ifndef TASK_SIZE
 /*
  * TASK_SIZE - the maximum size of a user space task.
@@ -47,6 +50,60 @@
 #define PAGE_OFFSET		UL(0xc0000000)
 #endif
 
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULE_END		(PAGE_OFFSET)
+#define MODULE_START		(MODULE_END - 16*1048576)
+
+#if TASK_SIZE > MODULE_START
+#error Top of user space clashes with start of module space
+#endif
+
+/*
+ * The XIP kernel gets mapped at the bottom of the module vm area.
+ * Since we use sections to map it, this macro replaces the physical address
+ * with its virtual address while keeping offset from the base section.
+ */
+#define XIP_VIRT_ADDR(physaddr)  (MODULE_START + ((physaddr) & 0x000fffff))
+
+#else /* CONFIG_MMU */
+
+/*
+ * The limitation of user task size can grow up to the end of free ram region.
+ * It is difficult to define and perhaps will never meet the original meaning
+ * of this define that was meant to.
+ * Fortunately, there is no reference for this in noMMU mode, for now.
+ */
+#ifndef TASK_SIZE
+#define TASK_SIZE		(CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef TASK_UNMAPPED_BASE
+#define TASK_UNMAPPED_BASE	UL(0x00000000)
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET 		(CONFIG_DRAM_BASE)
+#endif
+
+#ifndef END_MEM
+#define END_MEM     		(CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET		(PHYS_OFFSET)
+#endif
+
+/*
+ * The module can be at any place in ram in nommu mode.
+ */
+#define MODULE_END		(END_MEM)
+#define MODULE_START		(PHYS_OFFSET)
+
+#endif /* !CONFIG_MMU */
+
 /*
  * Size of DMA-consistent memory region.  Must be multiple of 2M,
  * between 2MB and 14MB inclusive.
@@ -71,24 +128,6 @@
 #define	__phys_to_pfn(paddr)	((paddr) >> PAGE_SHIFT)
 #define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
 
-/*
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 32MB of the kernel text.
- */
-#define MODULE_END	(PAGE_OFFSET)
-#define MODULE_START	(MODULE_END - 16*1048576)
-
-#if TASK_SIZE > MODULE_START
-#error Top of user space clashes with start of module space
-#endif
-
-/*
- * The XIP kernel gets mapped at the bottom of the module vm area.
- * Since we use sections to map it, this macro replaces the physical address
- * with its virtual address while keeping offset from the base section.
- */
-#define XIP_VIRT_ADDR(physaddr)  (MODULE_START + ((physaddr) & 0x000fffff))
-
 #ifndef __ASSEMBLY__
 
 /*

+ 16 - 0
include/asm-arm/mmu.h

@@ -1,6 +1,8 @@
 #ifndef __ARM_MMU_H
 #define __ARM_MMU_H
 
+#ifdef CONFIG_MMU
+
 typedef struct {
 #if __LINUX_ARM_ARCH__ >= 6
 	unsigned int id;
@@ -13,4 +15,18 @@ typedef struct {
 #define ASID(mm)	(0)
 #endif
 
+#else
+
+/*
+ * From nommu.h:
+ *  Copyright (C) 2002, David McCullough <davidm@snapgear.com>
+ *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
+ */
+typedef struct {
+	struct vm_list_struct	*vmlist;
+	unsigned long		end_brk;
+} mm_context_t;
+
+#endif
+
 #endif

+ 2 - 0
include/asm-arm/mmu_context.h

@@ -82,6 +82,7 @@ static inline void
 switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	  struct task_struct *tsk)
 {
+#ifdef CONFIG_MMU
 	unsigned int cpu = smp_processor_id();
 
 	if (prev != next) {
@@ -91,6 +92,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 		if (cache_is_vivt())
 			cpu_clear(cpu, prev->cpu_vm_mask);
 	}
+#endif
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)

+ 51 - 0
include/asm-arm/page-nommu.h

@@ -0,0 +1,51 @@
+/*
+ *  linux/include/asm-arm/page-nommu.h
+ *
+ *  Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PAGE_NOMMU_H
+#define _ASMARM_PAGE_NOMMU_H
+
+#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
+#define KTHREAD_SIZE (8192)
+#else
+#define KTHREAD_SIZE PAGE_SIZE
+#endif
+ 
+#define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)	free_page(addr)
+
+#define clear_page(page)	memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)      (x)
+#define pmd_val(x)      (x)
+#define pgd_val(x)	((x)[0])
+#define pgprot_val(x)   (x)
+
+#define __pte(x)        (x)
+#define __pmd(x)        (x)
+#define __pgprot(x)     (x)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif

+ 8 - 0
include/asm-arm/page.h

@@ -23,6 +23,12 @@
 
 #ifndef __ASSEMBLY__
 
+#ifndef CONFIG_MMU
+
+#include "page-nommu.h"
+
+#else
+
 #include <asm/glue.h>
 
 /*
@@ -171,6 +177,8 @@ typedef unsigned long pgprot_t;
 /* the upper-most page table pointer */
 extern pmd_t *top_pmd;
 
+#endif /* CONFIG_MMU */
+
 #include <asm/memory.h>
 
 #endif /* !__ASSEMBLY__ */

+ 6 - 2
include/asm-arm/pgalloc.h

@@ -16,6 +16,10 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#define check_pgt_cache()		do { } while (0)
+
+#ifdef CONFIG_MMU
+
 #define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
 #define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
 
@@ -32,8 +36,6 @@ extern void free_pgd_slow(pgd_t *pgd);
 #define pgd_alloc(mm)			get_pgd_slow(mm)
 #define pgd_free(pgd)			free_pgd_slow(pgd)
 
-#define check_pgt_cache()		do { } while (0)
-
 /*
  * Allocate one PTE table.
  *
@@ -126,4 +128,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
 	__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
 }
 
+#endif /* CONFIG_MMU */
+
 #endif

+ 123 - 0
include/asm-arm/pgtable-nommu.h

@@ -0,0 +1,123 @@
+/*
+ *  linux/include/asm-arm/pgtable-nommu.h
+ *
+ *  Copyright (C) 1995-2002 Russell King
+ *  Copyright (C) 2004  Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_NOMMU_H
+#define _ASMARM_PGTABLE_NOMMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+/*
+ * Trivial page table functions.
+ */
+#define pgd_present(pgd)	(1)
+#define pgd_none(pgd)		(0)
+#define pgd_bad(pgd)		(0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr)	(1)
+#define	pmd_offset(a, b)	((void *)0)
+/* FIXME */
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT		21
+
+#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+/* FIXME */
+
+#define PAGE_NONE	__pgprot(0)
+#define PAGE_SHARED	__pgprot(0)
+#define PAGE_COPY	__pgprot(0)
+#define PAGE_READONLY	__pgprot(0)
+#define PAGE_KERNEL	__pgprot(0)
+
+//extern void paging_init(struct meminfo *, struct machine_desc *);
+#define swapper_pg_dir ((pgd_t *) 0)
+
+#define __swp_type(x)		(0)
+#define __swp_offset(x)		(0)
+#define __swp_entry(typ,off)	((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+
+
+typedef pte_t *pte_addr_t;
+
+static inline int pte_file(pte_t pte) { return 0; }
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr)	(virt_to_page(0))
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot)	__pgprot(0)
+#define pgprot_writecombine(prot) __pgprot(0)
+
+
+/*
+ * These would be in other places but having them here reduces the diffs.
+ */
+extern unsigned int kobjsize(const void *objp);
+extern int is_in_rom(unsigned long);
+
+/*
+ * No page table caches to initialise.
+ */
+#define pgtable_cache_init()	do { } while (0)
+#define io_remap_page_range	remap_page_range
+#define io_remap_pfn_range	remap_pfn_range
+
+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
+#define GET_IOSPACE(pfn)		0
+#define GET_PFN(pfn)			(pfn)
+
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define	VMALLOC_START	0
+#define	VMALLOC_END	0xffffffff
+
+#define FIRST_USER_ADDRESS      (0)
+
+#else 
+
+/*
+ * dummy tlb and user structures.
+ */
+#define v3_tlb_fns	(0)
+#define v4_tlb_fns	(0)
+#define v4wb_tlb_fns	(0)
+#define v4wbi_tlb_fns	(0)
+#define v6_tlb_fns	(0)
+
+#define v3_user_fns	(0)
+#define v4_user_fns	(0)
+#define v4_mc_user_fns	(0)
+#define v4wb_user_fns	(0)
+#define v4wt_user_fns	(0)
+#define v6_user_fns	(0)
+#define xscale_mc_user_fns (0)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASMARM_PGTABLE_H */

+ 9 - 1
include/asm-arm/pgtable.h

@@ -11,9 +11,15 @@
 #define _ASMARM_PGTABLE_H
 
 #include <asm-generic/4level-fixup.h>
+#include <asm/proc-fns.h>
+
+#ifndef CONFIG_MMU
+
+#include "pgtable-nommu.h"
+
+#else
 
 #include <asm/memory.h>
-#include <asm/proc-fns.h>
 #include <asm/arch/vmalloc.h>
 
 /*
@@ -378,4 +384,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 #endif /* !__ASSEMBLY__ */
 
+#endif /* CONFIG_MMU */
+
 #endif /* _ASMARM_PGTABLE_H */

+ 4 - 0
include/asm-arm/proc-fns.h

@@ -165,6 +165,8 @@
 
 #include <asm/memory.h>
 
+#ifdef CONFIG_MMU
+
 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
 
 #define cpu_get_pgd()	\
@@ -176,6 +178,8 @@
 		(pgd_t *)phys_to_virt(pg);		\
 	})
 
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_PROCFNS_H */

+ 87 - 52
include/asm-arm/uaccess.h

@@ -40,16 +40,25 @@ struct exception_table_entry
 
 extern int fixup_exception(struct pt_regs *regs);
 
+/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
 /*
  * Note that this is actually 0x1,0000,0000
  */
 #define KERNEL_DS	0x00000000
-#define USER_DS		TASK_SIZE
-
 #define get_ds()	(KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS		TASK_SIZE
 #define get_fs()	(current_thread_info()->addr_limit)
 
-static inline void set_fs (mm_segment_t fs)
+static inline void set_fs(mm_segment_t fs)
 {
 	current_thread_info()->addr_limit = fs;
 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
 		: "cc"); \
 	flag; })
 
-#define access_ok(type,addr,size)	(__range_ok(addr,size) == 0)
-
 /*
  * Single-value transfer routines.  They automatically use the right
  * size if we just have the right pointer type.  Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
  * fixup code, but there are a few places where it intrudes on the
  * main code path.  When we only write to user space, there is no
  * problem.
- *
- * The "__xxx" versions of the user access functions do not verify the
- * address space - it must have been done previously with a separate
- * "access_ok()" call.
- *
- * The "xxx_error" versions set the third argument to EFAULT if an
- * error occurs, and leave it unchanged on success.  Note that these
- * versions are void (ie, don't return a value as such).
  */
-
 extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
-extern int __get_user_bad(void);
 
 #define __get_user_x(__r2,__p,__e,__s,__i...)				\
 	   __asm__ __volatile__ (					\
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
 		__e;							\
 	})
 
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s)					\
+	   __asm__ __volatile__ (					\
+		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
+		"bl	__put_user_" #__s				\
+		: "=&r" (__e)						\
+		: "0" (__p), "r" (__r2)					\
+		: "ip", "lr", "cc")
+
+#define put_user(x,p)							\
+	({								\
+		const register typeof(*(p)) __r2 asm("r2") = (x);	\
+		const register typeof(*(p)) __user *__p asm("r0") = (p);\
+		register int __e asm("r0");				\
+		switch (sizeof(*(__p))) {				\
+		case 1:							\
+			__put_user_x(__r2, __p, __e, 1);		\
+			break;						\
+		case 2:							\
+			__put_user_x(__r2, __p, __e, 2);		\
+			break;						\
+		case 4:							\
+			__put_user_x(__r2, __p, __e, 4);		\
+			break;						\
+		case 8:							\
+			__put_user_x(__r2, __p, __e, 8);		\
+			break;						\
+		default: __e = __put_user_bad(); break;			\
+		}							\
+		__e;							\
+	})
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS			KERNEL_DS
+
+#define segment_eq(a,b)		(1)
+#define __addr_ok(addr)		(1)
+#define __range_ok(addr,size)	(0)
+#define get_fs()		(KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p)	__get_user(x,p)
+#define put_user(x,p)	__put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size)	(__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success.  Note that these
+ * versions are void (ie, don't return a value as such).
+ */
 #define __get_user(x,ptr)						\
 ({									\
 	long __gu_err = 0;						\
@@ -212,43 +277,6 @@ do {									\
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
 
-extern int __put_user_1(void *, unsigned int);
-extern int __put_user_2(void *, unsigned int);
-extern int __put_user_4(void *, unsigned int);
-extern int __put_user_8(void *, unsigned long long);
-extern int __put_user_bad(void);
-
-#define __put_user_x(__r2,__p,__e,__s)					\
-	   __asm__ __volatile__ (					\
-		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
-		"bl	__put_user_" #__s				\
-		: "=&r" (__e)						\
-		: "0" (__p), "r" (__r2)					\
-		: "ip", "lr", "cc")
-
-#define put_user(x,p)							\
-	({								\
-		const register typeof(*(p)) __r2 asm("r2") = (x);	\
-		const register typeof(*(p)) __user *__p asm("r0") = (p);\
-		register int __e asm("r0");				\
-		switch (sizeof(*(__p))) {				\
-		case 1:							\
-			__put_user_x(__r2, __p, __e, 1);		\
-			break;						\
-		case 2:							\
-			__put_user_x(__r2, __p, __e, 2);		\
-			break;						\
-		case 4:							\
-			__put_user_x(__r2, __p, __e, 4);		\
-			break;						\
-		case 8:							\
-			__put_user_x(__r2, __p, __e, 8);		\
-			break;						\
-		default: __e = __put_user_bad(); break;			\
-		}							\
-		__e;							\
-	})
-
 #define __put_user(x,ptr)						\
 ({									\
 	long __pu_err = 0;						\
@@ -354,9 +382,16 @@ do {									\
 	: "cc")
 
 
+#ifdef CONFIG_MMU
 extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n)	(memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n)	(memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n)		(memset((void __force *)addr, 0, n), 0)
+#endif
+
 extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
 extern unsigned long __strnlen_user(const char __user *s, long n);