summaryrefslogtreecommitdiffstats
path: root/target/linux/realtek/files/arch/rlx/mm
diff options
context:
space:
mode:
authorRoman Yeryomin <roman@advem.lv>2012-09-13 00:40:35 +0300
committerRoman Yeryomin <roman@advem.lv>2012-12-03 00:13:21 +0200
commit5deb3317cb51ac52de922bb55f8492624018906d (patch)
treec2fbe6346699d9bb0f2100490c3029519bb8fde8 /target/linux/realtek/files/arch/rlx/mm
parent0239d37124f9184b478a42de8a7fa1bc85a6a6fe (diff)
Add realtek target files
Signed-off-by: Roman Yeryomin <roman@advem.lv>
Diffstat (limited to 'target/linux/realtek/files/arch/rlx/mm')
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/Makefile12
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/cache-rlx.c432
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/cache.c172
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/dma-default.c334
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/extable.c21
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/fault.c248
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/highmem.c130
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/imem-dmem.S188
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/init.c360
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/ioremap.c191
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/page-rlx.c299
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/pgtable-32.c70
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/tlb-rlx.c261
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/tlbex-fault.S28
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/tlbex.c449
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/uasm.c545
-rw-r--r--target/linux/realtek/files/arch/rlx/mm/uasm.h166
17 files changed, 3906 insertions, 0 deletions
diff --git a/target/linux/realtek/files/arch/rlx/mm/Makefile b/target/linux/realtek/files/arch/rlx/mm/Makefile
new file mode 100644
index 000000000..1d98676f7
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Linux/MIPS-specific parts of the memory manager.
+#
+
+obj-y += cache.o dma-default.o extable.o fault.o \
+ init.o tlbex.o tlbex-fault.o uasm.o page-rlx.o
+
+obj-y += ioremap.o pgtable-32.o cache-rlx.o tlb-rlx.o imem-dmem.o
+
+obj-$(CONFIG_HIGHMEM) += highmem.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/target/linux/realtek/files/arch/rlx/mm/cache-rlx.c b/target/linux/realtek/files/arch/rlx/mm/cache-rlx.c
new file mode 100644
index 000000000..c183b95ee
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/cache-rlx.c
@@ -0,0 +1,432 @@
+/*
+ * cache-rlx.c: RLX specific mmu/cache code.
+ * Realtek Semiconductor Corp.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/wbflush.h>
+
+#include <asm/rlxbsp.h>
+
+/*
+ * Determine whether CPU has CACHE OP
+ */
+#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || \
+ defined(CONFIG_CPU_RLX4281) || defined(CONFIG_CPU_RLX5281)
+#define CONFIG_CPU_HAS_CACHE_OP
+#else
+#undef CONFIG_CPU_HAS_CACHE_OP
+#endif
+
+/*
+ * DCACHE part
+ */
+#if defined(CONFIG_CPU_HAS_WBC) || defined(CONFIG_CPU_HAS_L2C)
+static void inline rlx_dcache_flush_all(void)
+{
+ __asm__ __volatile__(
+ ".set\tpush\n"
+ ".set\tnoreorder\n"
+ "\tmtc0\t$0, $20\n"
+ "\tli\t$8, 0x200\n"
+ "\tmtc0\t$8, $20\n"
+ ".set\tpop\n");
+}
+
+__attribute__ ((section(".iram-gen")))
+static void rlx_dcache_flush_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_CPU_HAS_CACHE_OP
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ start &= ~cpu_dcache_line_mask;
+ size = end - start;
+ if (size >= cpu_dcache_size * 2)
+ {
+ rlx_dcache_flush_all();
+ return;
+ }
+
+ p = (char *)start;
+ flags = read_c0_status();
+
+ /* disable interrupt */
+ write_c0_status(flags & ~ST0_IEC);
+
+ /* 0x10 = IInval */
+ /* 0x11 = DInval */
+ /* 0x15 = DWBInval */
+ /* 0x19 = DWB */
+
+ for (i = 0; i < size; i += 0x080) {
+ asm (
+#if (cpu_dcache_line == 16)
+ "cache 0x15, 0x000(%0)\n\t"
+ "cache 0x15, 0x010(%0)\n\t"
+ "cache 0x15, 0x020(%0)\n\t"
+ "cache 0x15, 0x030(%0)\n\t"
+ "cache 0x15, 0x040(%0)\n\t"
+ "cache 0x15, 0x050(%0)\n\t"
+ "cache 0x15, 0x060(%0)\n\t"
+ "cache 0x15, 0x070(%0)\n\t"
+#else
+ "cache 0x15, 0x000(%0)\n\t"
+ "cache 0x15, 0x020(%0)\n\t"
+ "cache 0x15, 0x040(%0)\n\t"
+ "cache 0x15, 0x060(%0)\n\t"
+#endif
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ /* restore interrupt */
+ write_c0_status(flags);
+#else
+ rlx_dcache_flush_all();
+#endif
+}
+
+void rlx_dcache_wb_all(void)
+{
+ __asm__ __volatile__(
+ ".set\tpush\n"
+ ".set\tnoreorder\n"
+ "\tmtc0\t$0, $20\n"
+ "\tli\t$8, 0x100\n"
+ "\tmtc0\t$8, $20\n"
+ ".set\tpop\n");
+}
+
+static void rlx_dcache_wb_range(unsigned long start, unsigned long end)
+{
+ #ifdef CONFIG_CPU_HAS_CACHE_OP
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ start &= ~cpu_dcache_line_mask;
+ size = end - start;
+ if (size >= cpu_dcache_size * 2)
+ {
+ rlx_dcache_wb_all();
+ return;
+ }
+
+ p = (char *)start;
+ flags = read_c0_status();
+
+ /* disable interrupt */
+ write_c0_status(flags & ~ST0_IEC);
+
+ /* 0x10 = IInval */
+ /* 0x11 = DInval */
+ /* 0x15 = DWBInval */
+ /* 0x19 = DWB */
+ for (i = 0; i < size; i += 0x080) {
+ asm (
+#if (cpu_dcache_line == 16)
+ "cache 0x19, 0x000(%0)\n\t"
+ "cache 0x19, 0x010(%0)\n\t"
+ "cache 0x19, 0x020(%0)\n\t"
+ "cache 0x19, 0x030(%0)\n\t"
+ "cache 0x19, 0x040(%0)\n\t"
+ "cache 0x19, 0x050(%0)\n\t"
+ "cache 0x19, 0x060(%0)\n\t"
+ "cache 0x19, 0x070(%0)\n\t"
+#else
+ "cache 0x19, 0x000(%0)\n\t"
+ "cache 0x19, 0x020(%0)\n\t"
+ "cache 0x19, 0x040(%0)\n\t"
+ "cache 0x19, 0x060(%0)\n\t"
+#endif
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ /* restore interrupt */
+ write_c0_status(flags);
+ #else
+ rlx_dcache_wb_all();
+ #endif
+}
+#else /* not CONFIG_CPU_HAS_WBC and not CONFIG_CPU_HAS_L2C */
+static void rlx_dcache_flush_all(void)
+{
+ __asm__ __volatile__(
+ ".set\tpush\n"
+ ".set\tnoreorder\n"
+ "\tmtc0\t$0, $20\n"
+ "\tli\t$8, 0x1\n"
+ "\tmtc0\t$8, $20\n"
+ ".set\tpop\n");
+
+ return;
+}
+
+static void rlx_dcache_flush_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_CPU_HAS_CACHE_OP
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ start &= ~cpu_dcache_line_mask;
+ size = end - start;
+ if (size >= cpu_dcache_size * 2)
+ {
+ rlx_dcache_flush_all();
+ return;
+ }
+
+ p = (char *)start;
+ flags = read_c0_status();
+
+ /* disable interrupt */
+ write_c0_status(flags &~ ST0_IEC);
+
+ /* 0x10 = IInval */
+ /* 0x11 = DInval */
+ /* 0x15 = DWBInval */
+ /* 0x19 = DWB */
+ for (i = 0; i < size; i += 0x080) {
+ asm (
+#if (cpu_dcache_line == 16)
+ "cache 0x11, 0x000(%0)\n\t"
+ "cache 0x11, 0x010(%0)\n\t"
+ "cache 0x11, 0x020(%0)\n\t"
+ "cache 0x11, 0x030(%0)\n\t"
+ "cache 0x11, 0x040(%0)\n\t"
+ "cache 0x11, 0x050(%0)\n\t"
+ "cache 0x11, 0x060(%0)\n\t"
+ "cache 0x11, 0x070(%0)\n\t"
+#else
+ "cache 0x11, 0x000(%0)\n\t"
+ "cache 0x11, 0x020(%0)\n\t"
+ "cache 0x11, 0x040(%0)\n\t"
+ "cache 0x11, 0x060(%0)\n\t"
+#endif
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ /* restore interrupt */
+ write_c0_status(flags);
+#else
+ rlx_dcache_flush_all();
+#endif
+}
+
+void rlx_dcache_wb_all(void)
+{
+}
+
+static void rlx_dcache_wb_range(unsigned long start, unsigned long end)
+{
+}
+#endif /* CONFIG_CPU_HAS_WBC or CONFIG_CPU_HAS_L2C */
+
+/*
+ * ICACHE part
+ */
+static void rlx_icache_flush_all(void)
+{
+ __asm__ __volatile__(
+ ".set\tpush\n"
+ ".set\tnoreorder\n"
+ "\tmtc0\t$0, $20\n"
+ "\tli\t$8, 0x2\n"
+ "\tmtc0\t$8, $20\n"
+ ".set\tpop\n");
+}
+
+static void rlx_icache_flush_range(unsigned long start, unsigned long end)
+{
+#if defined(CONFIG_CPU_RLX4281) || defined(CONFIG_CPU_RLX5281)
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ rlx_dcache_wb_range(start, end);
+
+ start &= ~cpu_icache_line_mask;
+ size = end - start;
+ if (size >= cpu_icache_size * 2)
+ {
+ rlx_icache_flush_all();
+ return;
+ }
+
+ p = (char *)start;
+ flags = read_c0_status();
+
+ /* disable interrupt */
+ write_c0_status(flags &~ ST0_IEC);
+
+ /* 0x10 = IInval */
+ /* 0x11 = DInval */
+ /* 0x15 = DWBInval */
+ /* 0x19 = DWB */
+ for (i = 0; i < size; i += 0x080) {
+ asm (
+#if (cpu_icache_line == 16)
+ "cache 0x10, 0x000(%0)\n\t"
+ "cache 0x10, 0x010(%0)\n\t"
+ "cache 0x10, 0x020(%0)\n\t"
+ "cache 0x10, 0x030(%0)\n\t"
+ "cache 0x10, 0x040(%0)\n\t"
+ "cache 0x10, 0x050(%0)\n\t"
+ "cache 0x10, 0x060(%0)\n\t"
+ "cache 0x10, 0x070(%0)\n\t"
+#else
+ "cache 0x10, 0x000(%0)\n\t"
+ "cache 0x10, 0x020(%0)\n\t"
+ "cache 0x10, 0x040(%0)\n\t"
+ "cache 0x10, 0x060(%0)\n\t"
+#endif
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ /* restore interrupt */
+ write_c0_status(flags);
+#else
+ rlx_dcache_wb_range(start, end);
+ rlx_icache_flush_all();
+#endif
+}
+
+static inline void rlx_cache_flush_all(void)
+{
+}
+
+static inline void __rlx_cache_flush_all(void)
+{
+ rlx_dcache_flush_all();
+ rlx_icache_flush_all();
+}
+
+static void rlx_cache_flush_mm(struct mm_struct *mm)
+{
+}
+
+static void rlx_cache_flush_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static void rlx_cache_flush_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pr_debug("cpage[%08lx,%08lx]\n",
+ cpu_context(smp_processor_id(), mm), addr);
+
+ /* No ASID => no such page in the cache. */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ ptep = pte_offset(pmdp, addr);
+
+ /* Invalid => no such page in the cache. */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ rlx_dcache_flush_range(kaddr, kaddr + PAGE_SIZE);
+ if (exec)
+ rlx_icache_flush_range(kaddr, kaddr + PAGE_SIZE);
+}
+
+static void local_rlx_dcache_flush_page(void *addr)
+{
+}
+
+static void rlx_dcache_flush_page(unsigned long addr)
+{
+}
+
+static void rlx_cache_flush_sigtramp(unsigned long addr)
+{
+ unsigned long flags;
+
+ pr_debug("csigtramp[%08lx]\n", addr);
+
+ flags = read_c0_status();
+
+ /* disable interrupt */
+ write_c0_status(flags&~ST0_IEC);
+
+#if defined(CONFIG_CPU_HAS_WBC) || defined(CONFIG_CPU_HAS_L2C)
+ #ifndef CONFIG_CPU_HAS_CACHE_OP
+ rlx_dcache_flush_all();
+ #else
+ asm ( "cache\t0x19, 0x000(%0)\n\t" : : "r" (addr) );
+ #endif
+#endif
+
+#if defined(CONFIG_CPU_RLX4281) || defined(CONFIG_CPU_RLX5281)
+ asm ( "cache\t0x10, 0x000(%0)\n\t" : : "r" (addr) );
+#else
+ rlx_icache_flush_all();
+#endif
+
+ /* restore interrupt */
+ write_c0_status(flags);
+}
+
+static void rlx_dma_cache_wback_inv(unsigned long start, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ rlx_dcache_flush_range(start, start + size);
+}
+
+void __cpuinit rlx_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+
+ flush_cache_all = rlx_cache_flush_all;
+ __flush_cache_all = __rlx_cache_flush_all;
+ flush_cache_mm = rlx_cache_flush_mm;
+ flush_cache_range = rlx_cache_flush_range;
+ flush_cache_page = rlx_cache_flush_page;
+ flush_icache_range = rlx_icache_flush_range;
+ local_flush_icache_range = rlx_icache_flush_range;
+ local_flush_data_cache_page = local_rlx_dcache_flush_page;
+ flush_data_cache_page = rlx_dcache_flush_page;
+ flush_cache_sigtramp = rlx_cache_flush_sigtramp;
+
+ _dma_cache_wback_inv = rlx_dma_cache_wback_inv;
+ _dma_cache_wback = rlx_dma_cache_wback_inv;
+ _dma_cache_inv = rlx_dma_cache_wback_inv;
+
+ printk("icache: %dkB/%dB, dcache: %dkB/%dB, scache: %dkB/%dB\n",
+ cpu_icache_size >> 10, cpu_icache_line,
+ cpu_dcache_size >> 10, cpu_dcache_line,
+ cpu_scache_size >> 10, cpu_scache_line);
+
+ build_clear_page();
+ build_copy_page();
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/cache.c b/target/linux/realtek/files/arch/rlx/mm/cache.c
new file mode 100644
index 000000000..26f90e5ea
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/cache.c
@@ -0,0 +1,172 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+/* Cache operations. */
+void (*flush_cache_all)(void);
+void (*__flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *mm);
+void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
+ unsigned long pfn);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+
+void (*__flush_cache_vmap)(void);
+void (*__flush_cache_vunmap)(void);
+
+/* MIPS specific cache operations */
+void (*flush_cache_sigtramp)(unsigned long addr);
+void (*local_flush_data_cache_page)(void * addr);
+void (*flush_data_cache_page)(unsigned long addr);
+void (*flush_icache_all)(void);
+
+EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
+EXPORT_SYMBOL(flush_data_cache_page);
+
+#ifdef CONFIG_DMA_NONCOHERENT
+
+/* DMA cache operations. */
+void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
+void (*_dma_cache_wback)(unsigned long start, unsigned long size);
+void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+
+EXPORT_SYMBOL(_dma_cache_wback_inv);
+
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+/*
+ * We could optimize the case where the cache argument is not BCACHE but
+ * that seems very atypical use ...
+ */
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ unsigned int, cache)
+{
+ if (bytes == 0)
+ return 0;
+ if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
+ return -EFAULT;
+
+ flush_icache_range(addr, addr + bytes);
+
+ return 0;
+}
+
+void __flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+ if (mapping && !mapping_mapped(mapping)) {
+ SetPageDcacheDirty(page);
+ return;
+ }
+
+ /*
+ * We could delay the flush for the !page_mapping case too. But that
+ * case is for exec env/arg pages and those are %99 certainly going to
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+
+EXPORT_SYMBOL(__flush_dcache_page);
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+ unsigned long addr = (unsigned long) page_address(page);
+
+ if (pages_do_alias(addr, vmaddr)) {
+ if (page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else
+ flush_data_cache_page(addr);
+ }
+}
+
+EXPORT_SYMBOL(__flush_anon_page);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC);
+
+ pfn = pte_pfn(pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && Page_dcache_dirty(page)) {
+ addr = (unsigned long) page_address(page);
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
+ ClearPageDcacheDirty(page);
+ }
+}
+
+unsigned long _page_cachable_default;
+EXPORT_SYMBOL_GPL(_page_cachable_default);
+
+static inline void setup_protection_map(void)
+{
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+}
+
+int __weak __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_SYNC)
+ return 1;
+
+ return addr >= __pa(high_memory);
+}
+
+void __devinit cpu_cache_init(void)
+{
+ extern void __weak rlx_cache_init(void);
+
+ rlx_cache_init();
+ setup_protection_map();
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/dma-default.c b/target/linux/realtek/files/arch/rlx/mm/dma-default.c
new file mode 100644
index 000000000..9fe69f2fb
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/dma-default.c
@@ -0,0 +1,334 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+
+#include <dma-coherence.h>
+
+static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
+{
+ unsigned long addr = plat_dma_addr_to_phys(dma_addr);
+
+ return (unsigned long)phys_to_virt(addr);
+}
+
+/*
+ * Warning on the terminology - Linux calls an uncached area coherent;
+ * MIPS terminology calls memory areas with hardware maintained coherency
+ * coherent.
+ */
+
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
+ ;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+ ;
+#endif
+
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
+ return gfp;
+}
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ gfp = massage_gfp_flags(dev, gfp);
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = plat_map_dma_mem(dev, ret, size);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ gfp = massage_gfp_flags(dev, gfp);
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+
+ if (ret)
+ {
+ memset(ret, 0, size);
+ *dma_handle = plat_map_dma_mem(dev, ret, size);
+
+ dma_cache_wback_inv((unsigned long) ret, size);
+ ret = UNCAC_ADDR(ret);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ plat_unmap_dma_mem(dev, dma_handle);
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long addr = (unsigned long) vaddr;
+
+ plat_unmap_dma_mem(dev, dma_handle);
+ addr = CAC_ADDR(addr);
+
+ free_pages(addr, get_order(size));
+}
+
+EXPORT_SYMBOL(dma_free_coherent);
+
+static inline void __dma_sync(unsigned long addr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback(addr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(addr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(addr, size);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ unsigned long addr = (unsigned long) ptr;
+
+ __dma_sync(addr, size, direction);
+
+ return plat_map_dma_mem(dev, ptr, size);
+}
+
+EXPORT_SYMBOL(dma_map_single);
+
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ plat_unmap_dma_mem(dev, dma_addr);
+}
+
+EXPORT_SYMBOL(dma_unmap_single);
+
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++, sg++)
+ {
+ unsigned long addr;
+
+ addr = (unsigned long) sg_virt(sg);
+ if (addr)
+ __dma_sync(addr, sg->length, direction);
+
+ sg->dma_address = plat_map_dma_mem(dev, (void *)addr, sg->length);
+ }
+
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ unsigned long addr;
+
+ BUG_ON(direction == DMA_NONE);
+
+ addr = (unsigned long) page_address(page) + offset;
+ __dma_sync(addr, size, direction);
+
+ return plat_map_dma_mem_page(dev, page) + offset;
+}
+
+EXPORT_SYMBOL(dma_map_page);
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ unsigned long addr;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nhwentries; i++, sg++)
+ {
+ if (direction != DMA_TO_DEVICE)
+ {
+ addr = (unsigned long) sg_virt(sg);
+ if (addr)
+ __dma_sync(addr, sg->length, direction);
+ }
+
+ plat_unmap_dma_mem(dev, sg->dma_address);
+ }
+}
+
+EXPORT_SYMBOL(dma_unmap_sg);
+
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ #ifdef CONFIG_ARCH_CPU_RLX5281
+ __asm__ __volatile__(
+ ".set\tpush\n"
+ ".set\tnoreorder\n"
+ "\tmtc0\t$0, $20\n"
+ "\tli\t$8, 0x200\n"
+ "\tmtc0\t$8, $20\n"
+ ".set\tpop\n");
+ #endif
+}
+
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ unsigned long addr;
+
+ BUG_ON(direction == DMA_NONE);
+
+ plat_extra_sync_for_device(dev);
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr, size, direction);
+}
+
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
+
+void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ unsigned long addr;
+ BUG_ON(direction == DMA_NONE);
+
+ plat_extra_sync_for_device(dev);
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr + offset, size, direction);
+}
+
+EXPORT_SYMBOL(dma_sync_single_range_for_device);
+
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for (i = 0; i < nelems; i++, sg++)
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
+ sg->length, direction);
+}
+
+EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return plat_dma_mapping_error(dev, dma_addr);
+}
+
+EXPORT_SYMBOL(dma_mapping_error);
+
+int dma_supported(struct device *dev, u64 mask)
+{
+ return plat_dma_supported(dev, mask);
+}
+
+EXPORT_SYMBOL(dma_supported);
+
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(dma_is_consistent);
+
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ plat_extra_sync_for_device(dev);
+ __dma_sync((unsigned long)vaddr, size, direction);
+}
+
+EXPORT_SYMBOL(dma_cache_sync);
diff --git a/target/linux/realtek/files/arch/rlx/mm/extable.c b/target/linux/realtek/files/arch/rlx/mm/extable.c
new file mode 100644
index 000000000..297fb9f39
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/extable.c
@@ -0,0 +1,21 @@
+/*
+ * linux/arch/mips/mm/extable.c
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/branch.h>
+#include <asm/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(exception_epc(regs));
+ if (fixup) {
+ regs->cp0_epc = fixup->nextinsn;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/fault.c b/target/linux/realtek/files/arch/rlx/mm/fault.c
new file mode 100644
index 000000000..e31bd95ab
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/fault.c
@@ -0,0 +1,248 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/module.h>
+
+#include <asm/branch.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/highmem.h> /* For VMALLOC_END */
+
+#ifdef CONFIG_PANIC_PRINTK
+#define printk panic_printk
+#endif
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
+ unsigned long address)
+{
+ struct vm_area_struct * vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ const int field = sizeof(unsigned long) * 2;
+ siginfo_t info;
+ int fault;
+
+#if 0
+ printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
+ current->comm, current->pid, field, address, write,
+ field, regs->cp0_epc);
+#endif
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
+ goto vmalloc_fault;
+#ifdef MODULE_START
+ if (unlikely(address >= MODULE_START && address < MODULE_END))
+ goto vmalloc_fault;
+#endif
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, write);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+#if 1
+ printk("do_page_fault() #2: sending SIGSEGV to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
+#endif
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs)) {
+ current->thread.cp0_baduaddr = address;
+ return;
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
+ "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
+ raw_smp_processor_id(), field, address, field, regs->cp0_epc,
+ field, regs->regs[31]);
+ die("Oops", regs);
+
+out_of_memory:
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ up_read(&mm->mmap_sem);
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ else
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+#if 1
+ printk("do_page_fault() #3: sending SIGBUS to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
+#endif
+ tsk->thread.cp0_badvaddr = address;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ return;
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = __pgd_offset(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/highmem.c b/target/linux/realtek/files/arch/rlx/mm/highmem.c
new file mode 100644
index 000000000..2b1309b25
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/highmem.c
@@ -0,0 +1,130 @@
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+unsigned long highstart_pfn, highend_pfn;
+
+void *__kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ addr = kmap_high(page);
+ flush_tlb_one((unsigned long)addr);
+
+ return addr;
+}
+EXPORT_SYMBOL(__kmap);
+
+void __kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+EXPORT_SYMBOL(__kunmap);
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+
+void *__kmap_atomic(struct page *page, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
+ local_flush_tlb_one((unsigned long)vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(__kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < FIXADDR_START) { // FIXME
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_one(vaddr);
+#endif
+
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ pagefault_disable();
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+ flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
+struct page *__kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long)ptr;
+ pte_t *pte;
+
+ if (vaddr < FIXADDR_START)
+ return virt_to_page(ptr);
+
+ idx = virt_to_fix(vaddr);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ return pte_page(*pte);
+}
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/imem-dmem.S b/target/linux/realtek/files/arch/rlx/mm/imem-dmem.S
new file mode 100644
index 000000000..c77cf531f
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/imem-dmem.S
@@ -0,0 +1,188 @@
+#include <asm/asmmacro.h>
+
+ .text
+ LEAF(_imem_dmem_init)
+ .set noreorder
+
+ #--- initialize and start COP3
+ mfc0 $8,$12
+ nop
+ nop
+ or $8,0x80000000
+ mtc0 $8,$12
+ nop
+ nop
+
+ #--- invalidate the IRAM with a 0->1 transition
+ mtc0 $0, $20 # CCTL
+ nop
+ nop
+ li $8,0x00000020 # IRAM Off
+ mtc0 $8, $20
+ nop
+ nop
+
+ #--- invalidate the icache and dcache with a 0->1 transition
+ mtc0 $0, $20 # CCTL
+ nop
+ nop
+ li $8,0x00000202 # Invalid ICACHE and DCACHE
+ mtc0 $8, $20
+ nop
+ nop
+
+ #--- load iram base and top
+#define IMEM0_SIZE 4096
+#define IMEM1_SIZE 4096
+ la $8,__iram
+ la $9,0x0fffc000
+ and $8,$8,$9
+ mtc3 $8,$0 # IW bas
+ nop
+ nop
+#ifdef CONFIG_ARCH_CPU_RLX5281
+#ifdef CONFIG_RTL8198_REVISION_B
+ //jasonwang0413
+ li t6,0xb8000000
+ lw t7,0(t6)
+ nop
+ nop
+ and t7,t7,0x03
+ bgtz t7,rev_b
+ nop
+rev_a: // 00
+ li t6,0xfff
+ li t4,0xfff
+ j rev_end
+ nop
+rev_b: // 01
+rev_c: // 02
+ li t6,0x7fff
+ li t4,0x1fff
+rev_end:
+ nop
+ nop
+
+ add $8,$8,t6
+#else
+ addiu $8,$8,IMEM0_SIZE-1
+#endif
+ mtc3 $8,$1 # IW top
+ nop
+ nop
+
+ #--- Refill the IRAM with a 0->1 transition
+ mtc0 $0, $20 # CCTL
+ nop
+ nop
+ li $8,0x00000010 # IRAM Fill
+ mtc0 $8, $20
+ nop
+ nop
+ #--- load iram base1 and top1
+ la $8,__iram
+#ifdef CONFIG_RTL8198_REVISION_B
+ add $8,$8,t6
+ add $8,$8,0x01
+#else
+ add $8,$8,IMEM0_SIZE
+#endif
+ la $9,0x0fffc000
+ and $8,$8,$9
+ mtc3 $8,$2 # IW bas 1
+ nop
+ nop
+#ifdef CONFIG_RTL8198_REVISION_B
+ add $8,$8,t4
+#else
+ addiu $8,$8,IMEM1_SIZE-1
+#endif
+ mtc3 $8,$3 # IW top 1
+ nop
+ nop
+
+ #--- Refill the IRAM with a 0->1 transition
+ mtc0 $0, $20,1 # CCTL
+ nop
+ nop
+ li $8,0x00000010 # IRAM Fill
+ mtc0 $8, $20,1
+ nop
+ nop
+
+ #--- load dram base and top
+ la $8,__dram_start
+ la $9,__dram_end
+ beq $8,$9,skip_dramInit
+ nop
+ la $9,0x0fffe000
+ and $8,$8,$9
+ mtc3 $8,$4 # DW bas
+ nop
+ nop
+ addiu $8,$8,0xfff
+ mtc3 $8,$5 # DW top
+ nop
+ nop
+ #la $8,__dram_start
+ #la $9,__dram_end
+ #beq $8,$9,skip_dramInit
+ #nop
+ #la $9,0x0fffe000
+ add $8,$8,1
+ #and $8,$8,$9
+ mtc3 $8,$6 # DW bas 1
+ nop
+ nop
+ addiu $8,$8,0xfff
+ mtc3 $8,$7 # DW top 1
+ nop
+ nop
+ li $8,0x00000400 # DMEM On // pkshih: add to enable DMEM0 and DMEM1
+ mtc0 $8, $20 # DMEM0 ON
+ mtc0 $8, $20,1 # DMEM1 ON
+ nop
+ nop
+
+
+#else
+ addiu $8,$8,0x3fff
+ mtc3 $8,$1 # IW top
+ nop
+ nop
+
+ #--- Refill the IRAM with a 0->1 transition
+ mtc0 $0, $20 # CCTL
+ nop
+ nop
+ li $8,0x00000010 # IRAM Fill
+ mtc0 $8, $20
+ nop
+ nop
+
+ #--- load dram base and top
+ la $8,__dram_start
+ la $9,__dram_end
+ beq $8,$9,skip_dramInit
+ nop
+ la $9,0x0fffe000
+ and $8,$8,$9
+ mtc3 $8,$4 # DW bas
+ nop
+ nop
+ addiu $8,$8,0x1fff
+ mtc3 $8,$5 # DW top
+ nop
+ nop
+#endif
+skip_dramInit:
+ #--- enable icache and dcache
+ mtc0 $0, $20 # CCTL
+ nop
+ nop
+
+ .set reorder
+ j $31
+ END(_imem_dmem_init)
+
+
diff --git a/target/linux/realtek/files/arch/rlx/mm/init.c b/target/linux/realtek/files/arch/rlx/mm/init.c
new file mode 100644
index 000000000..0d24ff1d6
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/init.c
@@ -0,0 +1,360 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/kmap_types.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/fixmap.h>
+
+/* Atomicity and interruptability */
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * where we have to avoid VCED / VECI exceptions for good performance at
+ * any price. Since page is never written to after the initialization we
+ * don't have to care about aliases on other CPUs.
+ */
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
+
+/*
+ * Not static inline because used by IP27 special magic initialization code
+ */
+unsigned long setup_zero_pages(void)
+{
+ unsigned int order;
+ unsigned long size;
+ struct page *page;
+
+ order = 0;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Oh boy, that early out of memory?");
+
+ page = virt_to_page((void *)empty_zero_page);
+ split_page(page, order);
+ while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
+ SetPageReserved(page);
+ page++;
+ }
+
+ size = PAGE_SIZE << order;
+ zero_page_mask = (size - 1) & PAGE_MASK;
+
+ return 1UL << order;
+}
+
+static inline void kmap_coherent_init(void) {}
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr, flags, entrylo;
+ unsigned long old_ctx;
+ pte_t pte;
+ int tlbidx;
+
+ BUG_ON(Page_dcache_dirty(page));
+
+ inc_preempt_count();
+ idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+ pte = mk_pte(page, PAGE_KERNEL);
+ entrylo = pte_val(pte) >> 6;
+
+ ENTER_CRITICAL(flags);
+ old_ctx = read_c0_entryhi();
+ write_c0_entryhi(vaddr & (PAGE_MASK << 1));
+ write_c0_entrylo0(entrylo);
+ write_c0_entrylo1(entrylo);
+ tlbidx = read_c0_wired();
+ write_c0_wired(tlbidx + 1);
+ write_c0_index(tlbidx);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ EXIT_CRITICAL(flags);
+
+ return (void*) vaddr;
+}
+
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+void kunmap_coherent(void)
+{
+ unsigned int wired;
+ unsigned long flags, old_ctx;
+
+ ENTER_CRITICAL(flags);
+ old_ctx = read_c0_entryhi();
+ wired = read_c0_wired() - 1;
+ write_c0_wired(wired);
+ write_c0_index(wired);
+ write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ EXIT_CRITICAL(flags);
+
+ dec_preempt_count();
+ preempt_check_resched();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+
+ if ((vma->vm_flags & VM_EXEC))
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+}
+
+void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+{
+#if defined(CONFIG_HIGHMEM)
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pud_offset(vaddr);
+ k = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pmd(pmd, __pmd((unsigned long)pte));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+#endif
+}
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+static int __init page_is_ram(unsigned long pagenr)
+{
+ int i;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long addr, end;
+
+ if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ /* not usable memory */
+ continue;
+
+ addr = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+
+ if (pagenr >= addr && pagenr < end)
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long lastpfn;
+
+ pagetable_init();
+
+#ifdef CONFIG_HIGHMEM
+ kmap_init();
+#endif
+ kmap_coherent_init();
+
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ lastpfn = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+ lastpfn = highend_pfn;
+#endif
+
+ free_area_init_nodes(max_zone_pfns);
+}
+
+static struct kcore_list kcore_mem, kcore_vmalloc;
+
+void __init mem_init(void)
+{
+ unsigned long codesize, reservedpages, datasize, initsize;
+ unsigned long tmp, ram;
+
+#ifdef CONFIG_HIGHMEM
+ max_mapnr = highend_pfn;
+#else
+ max_mapnr = max_low_pfn;
+#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ totalram_pages += free_all_bootmem();
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+
+ reservedpages = ram = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ if (page_is_ram(tmp)) {
+ ram++;
+ if (PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+ }
+ num_physpages = ram;
+
+#ifdef CONFIG_HIGHMEM
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ if (!page_is_ram(tmp)) {
+ SetPageReserved(page);
+ continue;
+ }
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ totalhigh_pages++;
+ }
+ totalram_pages += totalhigh_pages;
+ num_physpages += totalhigh_pages;
+#endif
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
+ "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ ram << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+}
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ void *addr = phys_to_virt(PFN_PHYS(pfn));
+
+ ClearPageReserved(page);
+ init_page_count(page);
+ memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+ __free_page(page);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory",
+ virt_to_phys((void *)start),
+ virt_to_phys((void *)end));
+}
+#endif
+
+void __init_refok free_initmem(void)
+{
+ bsp_free_prom_memory();
+ free_init_pages("unused kernel memory",
+ __pa_symbol(&__init_begin),
+ __pa_symbol(&__init_end));
+}
+
+unsigned long pgd_current[NR_CPUS];
+/*
+ * On 64-bit we've got three-level pagetables with a slightly
+ * different layout ...
+ */
+#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
+
+/*
+ * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
+ * are constants. So we use the variants from asm-offset.h until that gcc
+ * will officially be retired.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/target/linux/realtek/files/arch/rlx/mm/ioremap.c b/target/linux/realtek/files/arch/rlx/mm/ioremap.c
new file mode 100644
index 000000000..0c4324834
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/ioremap.c
@@ -0,0 +1,191 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2001, 2002 Ralf Baechle
+ */
+#include <linux/module.h>
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/tlbflush.h>
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address,
+ phys_t size, phys_t phys_addr, unsigned long flags)
+{
+ phys_t end;
+ unsigned long pfn;
+ pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
+ | __WRITEABLE | flags);
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ BUG_ON(address >= end);
+ pfn = phys_addr >> PAGE_SHIFT;
+ do {
+ if (!pte_none(*pte)) {
+ printk("remap_area_pte: page already exists\n");
+ BUG();
+ }
+ set_pte(pte, pfn_pte(pfn, pgprot));
+ address += PAGE_SIZE;
+ pfn++;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
+ phys_t size, phys_t phys_addr, unsigned long flags)
+{
+ phys_t end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ BUG_ON(address >= end);
+ do {
+ pte_t * pte = pte_alloc_kernel(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+static int remap_area_pages(unsigned long address, phys_t phys_addr,
+ phys_t size, unsigned long flags)
+{
+ int error;
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ BUG_ON(address >= end);
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, address);
+ if (!pmd)
+ break;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ break;
+ error = 0;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ flush_tlb_all();
+ return error;
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+
+#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
+
+void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
+{
+ struct vm_struct * area;
+ unsigned long offset;
+ phys_t last_addr;
+ void * addr;
+
+ phys_addr = fixup_bigphys_addr(phys_addr, size);
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Map uncached objects in the low 512mb of address space using KSEG1,
+ * otherwise map using page tables.
+ */
+ if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
+ flags == _CACHE_UNCACHED)
+ return (void __iomem *) CKSEG1ADDR(phys_addr);
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (phys_addr < virt_to_phys(high_memory)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = __va(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ if(!PageReserved(page))
+ return NULL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+ vunmap(addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + (char *)addr);
+}
+
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
+
+void __iounmap(const volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+
+ if (IS_KSEG1(addr))
+ return;
+
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p)
+ printk(KERN_ERR "iounmap: bad address %p\n", addr);
+
+ kfree(p);
+}
+
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(__iounmap);
diff --git a/target/linux/realtek/files/arch/rlx/mm/page-rlx.c b/target/linux/realtek/files/arch/rlx/mm/page-rlx.c
new file mode 100644
index 000000000..c93a74a7c
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/page-rlx.c
@@ -0,0 +1,299 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+
+#include <asm/cacheops.h>
+#include <asm/inst.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/rlxregs.h>
+#include <asm/mmu_context.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x58 bytes
+ * R4600 v1.7: 0x5c bytes
+ * R4600 v2.0: 0x60 bytes
+ * With prefetching, 16 byte strides 0xa0 bytes
+ */
+
+static unsigned int clear_page_array[0x130 / 4];
+void clear_page(void * page) __attribute__((alias("clear_page_array")));
+EXPORT_SYMBOL(clear_page);
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 byte strides 0x0b8 bytes
+ */
+static unsigned int copy_page_array[0x148 / 4];
+void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
+EXPORT_SYMBOL(copy_page);
+
+static int load_offset __cpuinitdata;
+static int store_offset __cpuinitdata;
+
+static unsigned int __cpuinitdata *dest, *epc;
+
+static unsigned int instruction_pending;
+static union mips_instruction delayed_mi;
+
+static void __cpuinit emit_instruction(union mips_instruction mi)
+{
+ if (instruction_pending)
+ *epc++ = delayed_mi.word;
+
+ instruction_pending = 1;
+ delayed_mi = mi;
+}
+
+static inline void flush_delay_slot_or_nop(void)
+{
+ if (instruction_pending) {
+ *epc++ = delayed_mi.word;
+ instruction_pending = 0;
+ return;
+ }
+
+ *epc++ = 0;
+}
+
+static inline unsigned int *label(void)
+{
+ if (instruction_pending) {
+ *epc++ = delayed_mi.word;
+ instruction_pending = 0;
+ }
+
+ return epc;
+}
+
+static inline void build_insn_word(unsigned int word)
+{
+ union mips_instruction mi;
+
+ mi.word = word;
+
+ emit_instruction(mi);
+}
+
+static inline void build_nop(void)
+{
+ build_insn_word(0); /* nop */
+}
+
+static inline void build_load_reg(int reg)
+{
+ union mips_instruction mi;
+ unsigned int width;
+
+ mi.i_format.opcode = lw_op;
+ width = 4;
+ mi.i_format.rs = 5; /* $a1 */
+ mi.i_format.rt = reg; /* $reg */
+ mi.i_format.simmediate = load_offset;
+
+ load_offset += width;
+ emit_instruction(mi);
+}
+
+static void __cpuinit build_store_reg(int reg)
+{
+ union mips_instruction mi;
+ unsigned int width;
+
+ mi.i_format.opcode = sw_op;
+ width = 4;
+ mi.i_format.rs = 4; /* $a0 */
+ mi.i_format.rt = reg; /* $reg */
+ mi.i_format.simmediate = store_offset;
+
+ store_offset += width;
+ emit_instruction(mi);
+}
+
+static inline void build_addiu_a2_a0(unsigned long offset)
+{
+ union mips_instruction mi;
+
+ BUG_ON(offset > 0x7fff);
+
+ mi.i_format.opcode = addiu_op;
+ mi.i_format.rs = 4; /* $a0 */
+ mi.i_format.rt = 6; /* $a2 */
+ mi.i_format.simmediate = offset;
+
+ emit_instruction(mi);
+}
+
+static inline void build_addiu_a2(unsigned long offset)
+{
+ union mips_instruction mi;
+
+ BUG_ON(offset > 0x7fff);
+
+ mi.i_format.opcode = addiu_op;
+ mi.i_format.rs = 6; /* $a2 */
+ mi.i_format.rt = 6; /* $a2 */
+ mi.i_format.simmediate = offset;
+
+ emit_instruction(mi);
+}
+
+static inline void build_addiu_a1(unsigned long offset)
+{
+ union mips_instruction mi;
+
+ BUG_ON(offset > 0x7fff);
+
+ mi.i_format.opcode = addiu_op;
+ mi.i_format.rs = 5; /* $a1 */
+ mi.i_format.rt = 5; /* $a1 */
+ mi.i_format.simmediate = offset;
+
+ load_offset -= offset;
+
+ emit_instruction(mi);
+}
+
+static inline void build_addiu_a0(unsigned long offset)
+{
+ union mips_instruction mi;
+
+ BUG_ON(offset > 0x7fff);
+
+ mi.i_format.opcode = addiu_op;
+ mi.i_format.rs = 4; /* $a0 */
+ mi.i_format.rt = 4; /* $a0 */
+ mi.i_format.simmediate = offset;
+
+ store_offset -= offset;
+
+ emit_instruction(mi);
+}
+
+static inline void build_bne(unsigned int *dest)
+{
+ union mips_instruction mi;
+
+ mi.i_format.opcode = bne_op;
+ mi.i_format.rs = 6; /* $a2 */
+ mi.i_format.rt = 4; /* $a0 */
+ mi.i_format.simmediate = dest - epc - 1;
+
+ *epc++ = mi.word;
+ flush_delay_slot_or_nop();
+}
+
+static inline void build_jr_ra(void)
+{
+ union mips_instruction mi;
+
+ mi.r_format.opcode = spec_op;
+ mi.r_format.rs = 31;
+ mi.r_format.rt = 0;
+ mi.r_format.rd = 0;
+ mi.r_format.re = 0;
+ mi.r_format.func = jr_op;
+
+ *epc++ = mi.word;
+ flush_delay_slot_or_nop();
+}
+
+void __cpuinit build_clear_page(void)
+{
+ unsigned int loop_start;
+ unsigned long off;
+
+ epc = (unsigned int *) &clear_page_array;
+ instruction_pending = 0;
+ store_offset = 0;
+
+ off = PAGE_SIZE;
+ build_addiu_a2_a0(off);
+
+ dest = label();
+ //do {
+ build_store_reg(0);
+ build_store_reg(0);
+ build_store_reg(0);
+ build_store_reg(0);
+ //} while (store_offset < half_scache_line_size());
+
+ build_addiu_a0(2 * store_offset);
+ loop_start = store_offset;
+ //do {
+ build_store_reg(0);
+ build_store_reg(0);
+ build_store_reg(0);
+ build_store_reg(0);
+ //} while ((store_offset - loop_start) < half_scache_line_size());
+ build_bne(dest);
+
+ build_jr_ra();
+
+ BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array));
+}
+
+void __cpuinit build_copy_page(void)
+{
+ unsigned int loop_start;
+
+ epc = (unsigned int *) &copy_page_array;
+ store_offset = load_offset = 0;
+ instruction_pending = 0;
+
+ build_addiu_a2_a0(PAGE_SIZE);
+
+ dest = label();
+ loop_start = store_offset;
+ //do {
+ build_load_reg( 8);
+ build_load_reg( 9);
+ build_load_reg(10);
+ build_load_reg(11);
+ build_store_reg( 8);
+ build_store_reg( 9);
+ build_store_reg(10);
+ build_store_reg(11);
+ //} while ((store_offset - loop_start) < half_scache_line_size());
+
+ build_addiu_a0(2 * store_offset);
+ build_addiu_a1(2 * load_offset);
+ loop_start = store_offset;
+ //do {
+ build_load_reg( 8);
+ build_load_reg( 9);
+ build_load_reg(10);
+ build_load_reg(11);
+ build_store_reg( 8);
+ build_store_reg( 9);
+ build_store_reg(10);
+ build_store_reg(11);
+ //} while ((store_offset - loop_start) < half_scache_line_size());
+ build_bne(dest);
+
+ build_jr_ra();
+
+ BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array));
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/pgtable-32.c b/target/linux/realtek/files/arch/rlx/mm/pgtable-32.c
new file mode 100644
index 000000000..575e40192
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/pgtable-32.c
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p = (unsigned long *) page;
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
+ p[i + 0] = (unsigned long) invalid_pte_table;
+ p[i + 1] = (unsigned long) invalid_pte_table;
+ p[i + 2] = (unsigned long) invalid_pte_table;
+ p[i + 3] = (unsigned long) invalid_pte_table;
+ p[i + 4] = (unsigned long) invalid_pte_table;
+ p[i + 5] = (unsigned long) invalid_pte_table;
+ p[i + 6] = (unsigned long) invalid_pte_table;
+ p[i + 7] = (unsigned long) invalid_pte_table;
+ }
+}
+
+void __init pagetable_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base;
+#ifdef CONFIG_HIGHMEM
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+#endif
+
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+ pgd_init((unsigned long)swapper_pg_dir
+ + sizeof(pgd_t) * USER_PTRS_PER_PGD);
+
+ pgd_base = swapper_pg_dir;
+
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, 0, pgd_base);
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * Permanent kmaps:
+ */
+ vaddr = PKMAP_BASE;
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+ pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+#endif
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/tlb-rlx.c b/target/linux/realtek/files/arch/rlx/mm/tlb-rlx.c
new file mode 100644
index 000000000..69238fc9b
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/tlb-rlx.c
@@ -0,0 +1,261 @@
+/*
+ * rlx-tlb.c: RLX TLB specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2002 Ralf Baechle
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+#undef DEBUG_TLB
+
+extern void build_tlb_refill_handler(void);
+
+/* CP0 hazard avoidance. */
+#define BARRIER \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "nop\n\t" \
+ ".set pop\n\t")
+
+/* TLB operations. */
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+
+#ifdef DEBUG_TLB
+ printk("[tlball]");
+#endif
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi() & ASID_MASK;
+ write_c0_entrylo0(0);
+ entry = read_c0_wired();
+ for (; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry << 8);
+ write_c0_entryhi((entry | 0x80000) << 12);
+ BARRIER;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(old_ctx);
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+#ifdef DEBUG_TLB
+ printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
+#endif
+ drop_mmu_context(mm, cpu);
+ }
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ unsigned long size, flags;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
+ cpu_context(cpu, mm) & ASID_MASK, start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int oldpid = read_c0_entryhi() & ASID_MASK;
+ int newpid = cpu_context(cpu, mm) & ASID_MASK;
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start | newpid);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(oldpid);
+ } else {
+ drop_mmu_context(mm, cpu);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, flags;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int pid = read_c0_entryhi();
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ } else {
+ local_flush_tlb_all();
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
+ unsigned long flags;
+ int oldpid, newpid, idx;
+
+#ifdef DEBUG_TLB
+ printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
+#endif
+ newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+ page &= PAGE_MASK;
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi() & ASID_MASK;
+ write_c0_entryhi(page | newpid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ goto finish;
+ tlb_write_indexed();
+
+finish:
+ write_c0_entryhi(oldpid);
+ local_irq_restore(flags);
+ }
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ int idx, pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ pid = read_c0_entryhi() & ASID_MASK;
+
+#ifdef DEBUG_TLB
+ if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK))
+ || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
+ (cpu_context(cpu, vma->vm_mm)), pid);
+ }
+#endif
+
+ local_irq_save(flags);
+ address &= PAGE_MASK;
+ write_c0_entryhi(address | pid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(pte_val(pte));
+ write_c0_entryhi(address | pid);
+ if (idx < 0) { /* BARRIER */
+ tlb_write_random();
+ } else {
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ local_irq_restore(flags);
+}
+
+void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+
+ unsigned long old_pagemask;
+ unsigned long w;
+
+#ifdef DEBUG_TLB
+ printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
+ entrylo0, entryhi, pagemask);
+#endif
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_pagemask = read_c0_pagemask();
+ w = read_c0_wired();
+ write_c0_wired(w + 1);
+ write_c0_index(w << 8);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ BARRIER;
+ tlb_write_indexed();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+}
+
+void __cpuinit tlb_init(void)
+{
+ local_flush_tlb_all();
+
+ build_tlb_refill_handler();
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/tlbex-fault.S b/target/linux/realtek/files/arch/rlx/mm/tlbex-fault.S
new file mode 100644
index 000000000..f41e65a61
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/tlbex-fault.S
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/rlxregs.h>
+#include <asm/page.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .macro tlb_do_page_fault, write
+ NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
+ SAVE_ALL
+ MFC0 a2, CP0_BADVADDR
+ KMODE
+ move a0, sp
+ REG_S a2, PT_BVADDR(sp)
+ li a1, \write
+ PTR_LA ra, ret_from_exception
+ j do_page_fault
+ END(tlb_do_page_fault_\write)
+ .endm
+
+ tlb_do_page_fault 0
+ tlb_do_page_fault 1
diff --git a/target/linux/realtek/files/arch/rlx/mm/tlbex.c b/target/linux/realtek/files/arch/rlx/mm/tlbex.c
new file mode 100644
index 000000000..51ab74043
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/tlbex.c
@@ -0,0 +1,449 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Synthesize TLB refill handlers at runtime.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completly out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+
+#include "uasm.h"
+
+/* Handle labels (which must be positive integers). */
+enum label_id
+{
+ label_second_part = 1,
+ label_leave,
+#ifdef MODULE_START
+ label_module_alloc,
+#endif
+ label_vmalloc,
+ label_vmalloc_done,
+ label_tlbw_hazard,
+ label_split,
+ label_nopage_tlbl,
+ label_nopage_tlbs,
+ label_nopage_tlbm,
+ label_rlx_write_probe_fail,
+};
+
+UASM_L_LA (_second_part) UASM_L_LA (_leave)
+#ifdef MODULE_START
+ UASM_L_LA (_module_alloc)
+#endif
+ UASM_L_LA (_vmalloc)
+UASM_L_LA (_vmalloc_done)
+UASM_L_LA (_tlbw_hazard)
+UASM_L_LA (_split)
+UASM_L_LA (_nopage_tlbl)
+UASM_L_LA (_nopage_tlbs)
+UASM_L_LA (_nopage_tlbm)
+UASM_L_LA (_rlx_write_probe_fail)
+/*
+ * For debug purposes.
+ */
+static inline void
+dump_handler (const u32 * handler, int count)
+{
+ int i;
+
+ pr_debug ("\t.set push\n");
+ pr_debug ("\t.set noreorder\n");
+
+ for (i = 0; i < count; i++)
+ pr_debug ("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
+
+ pr_debug ("\t.set pop\n");
+}
+
+/* The only general purpose registers allowed in TLB handlers. */
+#define K0 26
+#define K1 27
+
+/* Some CP0 registers */
+#define C0_INDEX 0, 0
+#define C0_ENTRYLO0 2, 0
+#define C0_TCBIND 2, 2
+#define C0_ENTRYLO1 3, 0
+#define C0_CONTEXT 4, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_EPC 14, 0
+#define C0_XCONTEXT 20, 0
+
+#define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
+
+/* The worst case length of the handler is around 18 instructions for
+ * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
+ * Maximum space available is 32 instructions for R3000 and 64
+ * instructions for R4000.
+ *
+ * We deliberately chose a buffer size of 128, so we won't scribble
+ * over anything important on overflow before we panic.
+ */
+static u32 tlb_handler[128] __cpuinitdata;
+
+/* simply assume worst case size for labels and relocs */
+static struct uasm_label labels[128] __cpuinitdata;
+static struct uasm_reloc relocs[128] __cpuinitdata;
+
+#define RLX_TRAP_TLB_BASE 0x80000000
+#define RLX_TRAP_TLB_SIZE 0x80
+
+/*
+ * The R3000 TLB handler is simple.
+ */
+static void __cpuinit
+build_rlx_tlb_refill_handler (void)
+{
+ long pgdc = (long) pgd_current;
+ u32 *p;
+
+ memset (tlb_handler, 0, sizeof (tlb_handler));
+ p = tlb_handler;
+
+ uasm_i_mfc0 (&p, K0, C0_BADVADDR);
+ uasm_i_lui (&p, K1, uasm_rel_hi (pgdc)); /* cp0 delay */
+ uasm_i_lw (&p, K1, uasm_rel_lo (pgdc), K1);
+ uasm_i_srl (&p, K0, K0, 22); /* load delay */
+ uasm_i_sll (&p, K0, K0, 2);
+ uasm_i_addu (&p, K1, K1, K0);
+ uasm_i_mfc0 (&p, K0, C0_CONTEXT);
+ uasm_i_lw (&p, K1, 0, K1); /* cp0 delay */
+ uasm_i_andi (&p, K0, K0, 0xffc); /* load delay */
+ uasm_i_addu (&p, K1, K1, K0);
+ uasm_i_lw (&p, K0, 0, K1);
+ uasm_i_nop (&p); /* load delay */
+ uasm_i_mtc0 (&p, K0, C0_ENTRYLO0);
+ uasm_i_mfc0 (&p, K1, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwr (&p); /* cp0 delay */
+ uasm_i_jr (&p, K1);
+ uasm_i_rfe (&p); /* branch delay */
+
+ if (p > tlb_handler + 32)
+ panic ("TLB refill handler space exceeded");
+
+ pr_debug ("Wrote TLB refill handler (%u instructions).\n",
+ (unsigned int) (p - tlb_handler));
+
+ memcpy ((void *) RLX_TRAP_TLB_BASE, tlb_handler, RLX_TRAP_TLB_SIZE);
+
+ dump_handler ((u32 *) RLX_TRAP_TLB_BASE, 32);
+}
+
+#if 0
+static void __cpuinit
+build_adjust_context (u32 ** p, unsigned int ctx)
+{
+ unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
+ unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
+
+ if (shift)
+ UASM_i_SRL (p, ctx, ctx, shift);
+
+ uasm_i_andi (p, ctx, ctx, mask);
+}
+#endif
+
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0 (void);
+extern void tlb_do_page_fault_1 (void);
+
+/*
+ * 128 instructions for the fastpath handler is generous and should
+ * never be exceeded.
+ */
+#define FASTPATH_SIZE 128
+
+u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
+u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
+u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
+
+static void __cpuinit
+iPTE_LW (u32 ** p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
+{
+ UASM_i_LW (p, pte, 0, ptr);
+}
+
+static void __cpuinit
+iPTE_SW (u32 ** p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
+ unsigned int mode)
+{
+ uasm_i_ori (p, pte, pte, mode);
+ UASM_i_SW (p, pte, 0, ptr);
+}
+
+/*
+ * Check if PTE is present, if not then jump to LABEL. PTR points to
+ * the page table where this PTE is located, PTE will be re-loaded
+ * with it's original value.
+ */
+static void __cpuinit
+build_pte_present (u32 ** p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi (p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_xori (p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_il_bnez (p, r, pte, lid);
+ iPTE_LW (p, l, pte, ptr);
+}
+
+/* Make PTE valid, store result in PTR. */
+static void __cpuinit
+build_make_valid (u32 ** p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
+
+ iPTE_SW (p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be written to, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __cpuinit
+build_pte_writable (u32 ** p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi (p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_xori (p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_il_bnez (p, r, pte, lid);
+ iPTE_LW (p, l, pte, ptr);
+}
+
+/* Make PTE writable, update software status bits as well, then store
+ * at PTR.
+ */
+static void __cpuinit
+build_make_write (u32 ** p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
+ | _PAGE_DIRTY);
+
+ iPTE_SW (p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be modified, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __cpuinit
+build_pte_modifiable (u32 ** p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi (p, pte, pte, _PAGE_WRITE);
+ uasm_il_beqz (p, r, pte, lid);
+ iPTE_LW (p, l, pte, ptr);
+}
+
+/*
+ * R3000 style TLB load/store/modify handlers.
+ */
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi.
+ * Then it returns.
+ */
+static void __cpuinit
+build_rlx_pte_reload_tlbwi (u32 ** p, unsigned int pte, unsigned int tmp)
+{
+ uasm_i_mtc0 (p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_i_mfc0 (p, tmp, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwi (p);
+ uasm_i_jr (p, tmp);
+ uasm_i_rfe (p); /* branch delay */
+}
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi
+ * or tlbwr as appropriate. This is because the index register
+ * may have the probe fail bit set as a result of a trap on a
+ * kseg2 access, i.e. without refill. Then it returns.
+ */
+static void __cpuinit
+build_rlx_tlb_reload_write (u32 ** p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int pte,
+ unsigned int tmp)
+{
+ uasm_i_mfc0 (p, tmp, C0_INDEX);
+ uasm_i_mtc0 (p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_il_bltz (p, r, tmp, label_rlx_write_probe_fail); /* cp0 delay */
+ uasm_i_mfc0 (p, tmp, C0_EPC); /* branch delay */
+ uasm_i_tlbwi (p); /* cp0 delay */
+ uasm_i_jr (p, tmp);
+ uasm_i_rfe (p); /* branch delay */
+ uasm_l_rlx_write_probe_fail (l, *p);
+ uasm_i_tlbwr (p); /* cp0 delay */
+ uasm_i_jr (p, tmp);
+ uasm_i_rfe (p); /* branch delay */
+}
+
+static void __cpuinit
+build_rlx_tlbchange_handler_head (u32 ** p, unsigned int pte,
+ unsigned int ptr)
+{
+ long pgdc = (long) pgd_current;
+
+ uasm_i_mfc0 (p, pte, C0_BADVADDR);
+ uasm_i_lui (p, ptr, uasm_rel_hi (pgdc)); /* cp0 delay */
+ uasm_i_lw (p, ptr, uasm_rel_lo (pgdc), ptr);
+ uasm_i_srl (p, pte, pte, 22); /* load delay */
+ uasm_i_sll (p, pte, pte, 2);
+ uasm_i_addu (p, ptr, ptr, pte);
+ uasm_i_mfc0 (p, pte, C0_CONTEXT);
+ uasm_i_lw (p, ptr, 0, ptr); /* cp0 delay */
+ uasm_i_andi (p, pte, pte, 0xffc); /* load delay */
+ uasm_i_addu (p, ptr, ptr, pte);
+ uasm_i_lw (p, pte, 0, ptr);
+ uasm_i_tlbp (p); /* load delay */
+}
+
+static void __cpuinit
+build_rlx_tlb_load_handler (void)
+{
+ u32 *p = handle_tlbl;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset (handle_tlbl, 0, sizeof (handle_tlbl));
+ memset (labels, 0, sizeof (labels));
+ memset (relocs, 0, sizeof (relocs));
+
+ build_rlx_tlbchange_handler_head (&p, K0, K1);
+ build_pte_present (&p, &l, &r, K0, K1, label_nopage_tlbl);
+ uasm_i_nop (&p); /* load delay */
+ build_make_valid (&p, &r, K0, K1);
+ build_rlx_tlb_reload_write (&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbl (&l, p);
+ uasm_i_j (&p, (unsigned long) tlb_do_page_fault_0 & 0x0fffffff);
+ uasm_i_nop (&p);
+
+ if ((p - handle_tlbl) > FASTPATH_SIZE)
+ panic ("TLB load handler fastpath space exceeded");
+
+ uasm_resolve_relocs (relocs, labels);
+ pr_debug ("Wrote TLB load handler fastpath (%u instructions).\n",
+ (unsigned int) (p - handle_tlbl));
+
+ dump_handler (handle_tlbl, ARRAY_SIZE (handle_tlbl));
+}
+
+static void __cpuinit
+build_rlx_tlb_store_handler (void)
+{
+ u32 *p = handle_tlbs;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset (handle_tlbs, 0, sizeof (handle_tlbs));
+ memset (labels, 0, sizeof (labels));
+ memset (relocs, 0, sizeof (relocs));
+
+ build_rlx_tlbchange_handler_head (&p, K0, K1);
+ build_pte_writable (&p, &l, &r, K0, K1, label_nopage_tlbs);
+ uasm_i_nop (&p); /* load delay */
+ build_make_write (&p, &r, K0, K1);
+ build_rlx_tlb_reload_write (&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbs (&l, p);
+ uasm_i_j (&p, (unsigned long) tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop (&p);
+
+ if ((p - handle_tlbs) > FASTPATH_SIZE)
+ panic ("TLB store handler fastpath space exceeded");
+
+ uasm_resolve_relocs (relocs, labels);
+ pr_debug ("Wrote TLB store handler fastpath (%u instructions).\n",
+ (unsigned int) (p - handle_tlbs));
+
+ dump_handler (handle_tlbs, ARRAY_SIZE (handle_tlbs));
+}
+
+static void __cpuinit
+build_rlx_tlb_modify_handler (void)
+{
+ u32 *p = handle_tlbm;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset (handle_tlbm, 0, sizeof (handle_tlbm));
+ memset (labels, 0, sizeof (labels));
+ memset (relocs, 0, sizeof (relocs));
+
+ build_rlx_tlbchange_handler_head (&p, K0, K1);
+ build_pte_modifiable (&p, &l, &r, K0, K1, label_nopage_tlbm);
+ uasm_i_nop (&p); /* load delay */
+ build_make_write (&p, &r, K0, K1);
+ build_rlx_pte_reload_tlbwi (&p, K0, K1);
+
+ uasm_l_nopage_tlbm (&l, p);
+ uasm_i_j (&p, (unsigned long) tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop (&p);
+
+ if ((p - handle_tlbm) > FASTPATH_SIZE)
+ panic ("TLB modify handler fastpath space exceeded");
+
+ uasm_resolve_relocs (relocs, labels);
+ pr_debug ("Wrote TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int) (p - handle_tlbm));
+
+ dump_handler (handle_tlbm, ARRAY_SIZE (handle_tlbm));
+}
+
+void __cpuinit
+build_tlb_refill_handler (void)
+{
+ /*
+ * The refill handler is generated per-CPU, multi-node systems
+ * may have local storage for it. The other handlers are only
+ * needed once.
+ */
+ static int run_once = 0;
+
+ build_rlx_tlb_refill_handler ();
+ if (!run_once)
+ {
+ build_rlx_tlb_load_handler ();
+ build_rlx_tlb_store_handler ();
+ build_rlx_tlb_modify_handler ();
+ run_once++;
+ }
+}
+
+void __cpuinit
+flush_tlb_handlers (void)
+{
+ local_flush_icache_range ((unsigned long) handle_tlbl,
+ (unsigned long) handle_tlbl + sizeof (handle_tlbl));
+ local_flush_icache_range ((unsigned long) handle_tlbs,
+ (unsigned long) handle_tlbs + sizeof (handle_tlbs));
+ local_flush_icache_range ((unsigned long) handle_tlbm,
+ (unsigned long) handle_tlbm + sizeof (handle_tlbm));
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/uasm.c b/target/linux/realtek/files/arch/rlx/mm/uasm.c
new file mode 100644
index 000000000..dee045884
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/uasm.c
@@ -0,0 +1,545 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+
+#include "uasm.h"
+
+enum fields {
+ RS = 0x001,
+ RT = 0x002,
+ RD = 0x004,
+ RE = 0x008,
+ SIMM = 0x010,
+ UIMM = 0x020,
+ BIMM = 0x040,
+ JIMM = 0x080,
+ FUNC = 0x100,
+ SET = 0x200
+};
+
+#define OP_MASK 0x3f
+#define OP_SH 26
+#define RS_MASK 0x1f
+#define RS_SH 21
+#define RT_MASK 0x1f
+#define RT_SH 16
+#define RD_MASK 0x1f
+#define RD_SH 11
+#define RE_MASK 0x1f
+#define RE_SH 6
+#define IMM_MASK 0xffff
+#define IMM_SH 0
+#define JIMM_MASK 0x3ffffff
+#define JIMM_SH 0
+#define FUNC_MASK 0x3f
+#define FUNC_SH 0
+#define SET_MASK 0x7
+#define SET_SH 0
+
+enum opcode {
+ insn_invalid,
+ insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
+ insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
+ insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
+ insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
+ insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
+ insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
+ insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+ insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
+ insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
+};
+
+struct insn {
+ enum opcode opcode;
+ u32 match;
+ enum fields fields;
+};
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+static struct insn insn_table[] __cpuinitdata = {
+ { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+ { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+ { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+ { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+ { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+ { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+ { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+ { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+ { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+ { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+ { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+ { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
+ { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
+ { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
+ { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
+ { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
+ { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
+ { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
+ { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __cpuinit u32 build_rs(u32 arg)
+{
+ if (arg & ~RS_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RS_MASK) << RS_SH;
+}
+
+static inline __cpuinit u32 build_rt(u32 arg)
+{
+ if (arg & ~RT_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RT_MASK) << RT_SH;
+}
+
+static inline __cpuinit u32 build_rd(u32 arg)
+{
+ if (arg & ~RD_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RD_MASK) << RD_SH;
+}
+
+static inline __cpuinit u32 build_re(u32 arg)
+{
+ if (arg & ~RE_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RE_MASK) << RE_SH;
+}
+
+static inline __cpuinit u32 build_simm(s32 arg)
+{
+ if (arg > 0x7fff || arg < -0x8000)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & 0xffff;
+}
+
+static inline __cpuinit u32 build_uimm(u32 arg)
+{
+ if (arg & ~IMM_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & IMM_MASK;
+}
+
+static inline __cpuinit u32 build_bimm(s32 arg)
+{
+ if (arg > 0x1ffff || arg < -0x20000)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ if (arg & 0x3)
+ printk(KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __cpuinit u32 build_jimm(u32 arg)
+{
+ if (arg & ~((JIMM_MASK) << 2))
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 2) & JIMM_MASK;
+}
+
+static inline __cpuinit u32 build_func(u32 arg)
+{
+ if (arg & ~FUNC_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & FUNC_MASK;
+}
+
+static inline __cpuinit u32 build_set(u32 arg)
+{
+ if (arg & ~SET_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & SET_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+ if (insn_table[i].opcode == opc) {
+ ip = &insn_table[i];
+ break;
+ }
+
+ if (!ip)
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS)
+ op |= build_rs(va_arg(ap, u32));
+ if (ip->fields & RT)
+ op |= build_rt(va_arg(ap, u32));
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ va_end(ap);
+
+ **buf = op;
+ (*buf)++;
+}
+
+#define I_u1u2u3(op) \
+Ip_u1u2u3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+}
+
+#define I_u2u1u3(op) \
+Ip_u2u1u3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+}
+
+#define I_u3u1u2(op) \
+Ip_u3u1u2(op) \
+{ \
+ build_insn(buf, insn##op, b, c, a); \
+}
+
+#define I_u1u2s3(op) \
+Ip_u1u2s3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+}
+
+#define I_u2s3u1(op) \
+Ip_u2s3u1(op) \
+{ \
+ build_insn(buf, insn##op, c, a, b); \
+}
+
+#define I_u2u1s3(op) \
+Ip_u2u1s3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+}
+
+#define I_u1u2(op) \
+Ip_u1u2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+}
+
+#define I_u1s2(op) \
+Ip_u1s2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+}
+
+#define I_u1(op) \
+Ip_u1(op) \
+{ \
+ build_insn(buf, insn##op, a); \
+}
+
+#define I_0(op) \
+Ip_0(op) \
+{ \
+ build_insn(buf, insn##op); \
+}
+
+I_u2u1s3(_addiu)
+I_u3u1u2(_addu)
+I_u2u1u3(_andi)
+I_u3u1u2(_and)
+I_u1u2s3(_beq)
+I_u1u2s3(_beql)
+I_u1s2(_bgez)
+I_u1s2(_bgezl)
+I_u1s2(_bltz)
+I_u1s2(_bltzl)
+I_u1u2s3(_bne)
+I_u2s3u1(_cache)
+I_u1u2u3(_dmfc0)
+I_u1u2u3(_dmtc0)
+I_u2u1s3(_daddiu)
+I_u3u1u2(_daddu)
+I_u2u1u3(_dsll)
+I_u2u1u3(_dsll32)
+I_u2u1u3(_dsra)
+I_u2u1u3(_dsrl)
+I_u2u1u3(_dsrl32)
+I_u3u1u2(_dsubu)
+I_0(_eret)
+I_u1(_j)
+I_u1(_jal)
+I_u1(_jr)
+I_u2s3u1(_ld)
+I_u2s3u1(_ll)
+I_u2s3u1(_lld)
+I_u1s2(_lui)
+I_u2s3u1(_lw)
+I_u1u2u3(_mfc0)
+I_u1u2u3(_mtc0)
+I_u2u1u3(_ori)
+I_u2s3u1(_pref)
+I_0(_rfe)
+I_u2s3u1(_sc)
+I_u2s3u1(_scd)
+I_u2s3u1(_sd)
+I_u2u1u3(_sll)
+I_u2u1u3(_sra)
+I_u2u1u3(_srl)
+I_u3u1u2(_subu)
+I_u2s3u1(_sw)
+I_0(_tlbp)
+I_0(_tlbwi)
+I_0(_tlbwr)
+I_u3u1u2(_xor)
+I_u2u1u3(_xori)
+
+/* Handle labels. */
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+{
+ (*lab)->addr = addr;
+ (*lab)->lab = lid;
+ (*lab)++;
+}
+
+int __cpuinit uasm_rel_hi(long val)
+{
+ return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
+}
+
+int __cpuinit uasm_rel_lo(long val)
+{
+ return ((val & 0xffff) ^ 0x8000) - 0x8000;
+}
+
+void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+{
+ uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+}
+
+void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+{
+ UASM_i_LA_mostly(buf, rs, addr);
+ if (uasm_rel_lo(addr))
+ uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+}
+
+/* Handle relocations. */
+void __cpuinit
+uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+{
+ (*rel)->addr = addr;
+ (*rel)->type = R_MIPS_PC16;
+ (*rel)->lab = lid;
+ (*rel)++;
+}
+
+static inline void __cpuinit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
+
+void __cpuinit
+uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ struct uasm_label *l;
+
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
+ if (rel->lab == l->lab)
+ __resolve_relocs(rel, l);
+}
+
+void __cpuinit
+uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ if (rel->addr >= first && rel->addr < end)
+ rel->addr += off;
+}
+
+void __cpuinit
+uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+{
+ for (; lab->lab != UASM_LABEL_INVALID; lab++)
+ if (lab->addr >= first && lab->addr < end)
+ lab->addr += off;
+}
+
+void __cpuinit
+uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ u32 *end, u32 *target)
+{
+ long off = (long)(target - first);
+
+ memcpy(target, first, (end - first) * sizeof(u32));
+
+ uasm_move_relocs(rel, first, end, off);
+ uasm_move_labels(lab, first, end, off);
+}
+
+int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++) {
+ if (rel->addr == addr
+ && (rel->type == R_MIPS_PC16
+ || rel->type == R_MIPS_26))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Convenience functions for labeled branches. */
+void __cpuinit
+uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bltz(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_b(p, 0);
+}
+
+void __cpuinit
+uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqz(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqzl(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bne(p, reg1, reg2, 0);
+}
+
+void __cpuinit
+uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bnez(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgezl(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgez(p, reg, 0);
+}
diff --git a/target/linux/realtek/files/arch/rlx/mm/uasm.h b/target/linux/realtek/files/arch/rlx/mm/uasm.h
new file mode 100644
index 000000000..b1c5717b5
--- /dev/null
+++ b/target/linux/realtek/files/arch/rlx/mm/uasm.h
@@ -0,0 +1,166 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#include <linux/types.h>
+
+#define Ip_u1u2u3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u2u1u3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u3u1u2(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u1u2s3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u2s3u1(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+
+#define Ip_u2u1s3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u1u2(op) \
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+
+#define Ip_u1s2(op) \
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+
+#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
+
+#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
+
+Ip_u2u1s3(_addiu);
+Ip_u3u1u2(_addu);
+Ip_u2u1u3(_andi);
+Ip_u3u1u2(_and);
+Ip_u1u2s3(_beq);
+Ip_u1u2s3(_beql);
+Ip_u1s2(_bgez);
+Ip_u1s2(_bgezl);
+Ip_u1s2(_bltz);
+Ip_u1s2(_bltzl);
+Ip_u1u2s3(_bne);
+Ip_u2s3u1(_cache);
+Ip_u1u2u3(_dmfc0);
+Ip_u1u2u3(_dmtc0);
+Ip_u2u1s3(_daddiu);
+Ip_u3u1u2(_daddu);
+Ip_u2u1u3(_dsll);
+Ip_u2u1u3(_dsll32);
+Ip_u2u1u3(_dsra);
+Ip_u2u1u3(_dsrl);
+Ip_u2u1u3(_dsrl32);
+Ip_u3u1u2(_dsubu);
+Ip_0(_eret);
+Ip_u1(_j);
+Ip_u1(_jal);
+Ip_u1(_jr);
+Ip_u2s3u1(_ld);
+Ip_u2s3u1(_ll);
+Ip_u2s3u1(_lld);
+Ip_u1s2(_lui);
+Ip_u2s3u1(_lw);
+Ip_u1u2u3(_mfc0);
+Ip_u1u2u3(_mtc0);
+Ip_u2u1u3(_ori);
+Ip_u2s3u1(_pref);
+Ip_0(_rfe);
+Ip_u2s3u1(_sc);
+Ip_u2s3u1(_scd);
+Ip_u2s3u1(_sd);
+Ip_u2u1u3(_sll);
+Ip_u2u1u3(_sra);
+Ip_u2u1u3(_srl);
+Ip_u3u1u2(_subu);
+Ip_u2s3u1(_sw);
+Ip_0(_tlbp);
+Ip_0(_tlbwi);
+Ip_0(_tlbwr);
+Ip_u3u1u2(_xor);
+Ip_u2u1u3(_xori);
+
+/* Handle labels. */
+struct uasm_label {
+ u32 *addr;
+ int lab;
+};
+
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+
+#define UASM_L_LA(lb) \
+static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+{ \
+ uasm_build_label(lab, addr, label##lb); \
+}
+
+/* convenience macros for instructions */
+#define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
+#define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
+#define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
+#define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
+#define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
+#define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
+#define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
+#define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
+#define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
+#define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
+#define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
+#define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
+
+#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
+#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
+#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
+#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
+#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
+#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
+#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
+#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
+#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
+
+/* Handle relocations. */
+struct uasm_reloc {
+ u32 *addr;
+ unsigned int type;
+ int lab;
+};
+
+/* This is zero so we can use zeroed label arrays. */
+#define UASM_LABEL_INVALID 0
+
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
+void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target);
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
+
+/* Convenience functions for labeled branches. */
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid);
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);