diff options
Diffstat (limited to 'target/linux/realtek/files/arch/rlx/include')
170 files changed, 16197 insertions, 0 deletions
diff --git a/target/linux/realtek/files/arch/rlx/include/asm/Kbuild b/target/linux/realtek/files/arch/rlx/include/asm/Kbuild new file mode 100644 index 000000000..7897f05e3 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/Kbuild @@ -0,0 +1,3 @@ +include include/asm-generic/Kbuild.asm + +header-y += cachectl.h sgidefs.h sysmips.h diff --git a/target/linux/realtek/files/arch/rlx/include/asm/abi.h b/target/linux/realtek/files/arch/rlx/include/asm/abi.h new file mode 100644 index 000000000..1dd74fbdc --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/abi.h @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005, 06 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2005 MIPS Technologies, Inc. + */ +#ifndef _ASM_ABI_H +#define _ASM_ABI_H + +#include <asm/signal.h> +#include <asm/siginfo.h> + +struct mips_abi { + int (* const setup_frame)(struct k_sigaction * ka, + struct pt_regs *regs, int signr, + sigset_t *set); + int (* const setup_rt_frame)(struct k_sigaction * ka, + struct pt_regs *regs, int signr, + sigset_t *set, siginfo_t *info); + const unsigned long restart; +}; + +#endif /* _ASM_ABI_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/addrspace.h b/target/linux/realtek/files/arch/rlx/include/asm/addrspace.h new file mode 100644 index 000000000..22b11072a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/addrspace.h @@ -0,0 +1,116 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 99 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999 by Silicon Graphics, Inc. + */ +#ifndef _ASM_ADDRSPACE_H +#define _ASM_ADDRSPACE_H + +#include <spaces.h> + +/* + * Configure language + */ +#ifdef __ASSEMBLY__ +#define _ATYPE_ +#define _ATYPE32_ +#define _ATYPE64_ +#define _CONST64_(x) x +#else +#define _ATYPE_ __PTRDIFF_TYPE__ +#define _ATYPE32_ int +#define _ATYPE64_ __s64 +#define _CONST64_(x) x ## LL +#endif + +/* + * 32-bit MIPS address spaces + */ +#ifdef __ASSEMBLY__ +#define _ACAST32_ +#define _ACAST64_ +#else +#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */ +#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */ +#endif + +/* + * Returns the kernel segment base of a given address + */ +#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000) + +/* + * Returns the physical address of a CKSEGx / XKPHYS address + */ +#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) +#define XPHYSADDR(a) ((_ACAST64_(a)) & \ + _CONST64_(0x000000ffffffffff)) + +#define CKSEG0ADDR(a) (CPHYSADDR(a) | KSEG0) +#define CKSEG1ADDR(a) (CPHYSADDR(a) | KSEG1) +#define CKSEG2ADDR(a) (CPHYSADDR(a) | KSEG2) +#define CKSEG3ADDR(a) (CPHYSADDR(a) | KSEG3) + +/* + * Map an address to a certain kernel segment + */ +#define KSEG0ADDR(a) (CPHYSADDR(a) | KSEG0) +#define KSEG1ADDR(a) (CPHYSADDR(a) | KSEG1) +#define KSEG2ADDR(a) (CPHYSADDR(a) | KSEG2) +#define KSEG3ADDR(a) (CPHYSADDR(a) | KSEG3) + +/* + * Memory segments (32bit kernel mode addresses) + * These are the traditional names used in the 32-bit universe. + */ +#define KUSEG 0x00000000 +#define KSEG0 0x80000000 +#define KSEG1 0xa0000000 +#define KSEG2 0xc0000000 +#define KSEG3 0xe0000000 + +#define CKUSEG 0x00000000 +#define CKSEG0 0x80000000 +#define CKSEG1 0xa0000000 +#define CKSEG2 0xc0000000 +#define CKSEG3 0xe0000000 + +/* + * Cache modes for XKPHYS address conversion macros + */ +#define K_CALG_COH_EXCL1_NOL2 0 +#define K_CALG_COH_SHRL1_NOL2 1 +#define K_CALG_UNCACHED 2 +#define K_CALG_NONCOHERENT 3 +#define K_CALG_COH_EXCL 4 +#define K_CALG_COH_SHAREABLE 5 +#define K_CALG_NOTUSED 6 +#define K_CALG_UNCACHED_ACCEL 7 + +/* + * 64-bit address conversions + */ +#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p)) +#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p)) +#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK) +#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \ + (_CONST64_(cm) << 59) | (a)) + +/* + * The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting + * the region, 3 bits for the CCA mode. This leaves 59 bits of which the + * R8000 implements most with its 48-bit physical address space. + */ +#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */ + +#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000) +#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */ + +#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK) +#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE) + +#endif /* _ASM_ADDRSPACE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/asm.h b/target/linux/realtek/files/arch/rlx/include/asm/asm.h new file mode 100644 index 000000000..109857798 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/asm.h @@ -0,0 +1,258 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle + * Copyright (C) 1999 by Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + * + * Some useful macros for MIPS assembler code + * + * Some of the routines below contain useless nops that will be optimized + * away by gas in -O mode. These nops are however required to fill delay + * slots in noreorder mode. + */ +#ifndef __ASM_ASM_H +#define __ASM_ASM_H + +#include <asm/sgidefs.h> + +#ifndef CAT +#ifdef __STDC__ +#define __CAT(str1, str2) str1##str2 +#else +#define __CAT(str1, str2) str1/**/str2 +#endif +#define CAT(str1, str2) __CAT(str1, str2) +#endif + +/* + * PIC specific declarations + * Not used for the kernel but here seems to be the right place. + */ +#ifdef __PIC__ +#define CPRESTORE(register) \ + .cprestore register +#define CPADD(register) \ + .cpadd register +#define CPLOAD(register) \ + .cpload register +#else +#define CPRESTORE(register) +#define CPADD(register) +#define CPLOAD(register) +#endif + +/* + * LEAF - declare leaf routine + */ +#define LEAF(symbol) \ + .globl symbol; \ + .align 2; \ + .type symbol, @function; \ + .ent symbol, 0; \ +symbol: .frame sp, 0, ra + +/* + * NESTED - declare nested routine entry point + */ +#define NESTED(symbol, framesize, rpc) \ + .globl symbol; \ + .align 2; \ + .type symbol, @function; \ + .ent symbol, 0; \ +symbol: .frame sp, framesize, rpc + +/* + * END - mark end of function + */ +#define END(function) \ + .end function; \ + .size function, .-function + +/* + * EXPORT - export definition of symbol + */ +#define EXPORT(symbol) \ + .globl symbol; \ +symbol: + +/* + * FEXPORT - export definition of a function symbol + */ +#define FEXPORT(symbol) \ + .globl symbol; \ + .type symbol, @function; \ +symbol: + +/* + * ABS - export absolute symbol + */ +#define ABS(symbol,value) \ + .globl symbol; \ +symbol = value + +#define PANIC(msg) \ + .set push; \ + .set reorder; \ + PTR_LA a0, 8f; \ + jal panic; \ +9: b 9b; \ + .set pop; \ + TEXT(msg) + +/* + * Print formatted string + */ +#ifdef CONFIG_PRINTK +#define PRINT(string) \ + .set push; \ + .set reorder; \ + PTR_LA a0, 8f; \ + jal printk; \ + .set pop; \ + TEXT(string) +#else +#define PRINT(string) +#endif + +#define TEXT(msg) \ + .pushsection .data; \ +8: .asciiz msg; \ + .popsection; + +/* + * Build text tables + */ +#define TTABLE(string) \ + .pushsection .text; \ + .word 1f; \ + .popsection \ + .pushsection .data; \ +1: .asciiz string; \ + .popsection + +#if !defined(__m4180) +#define MOVN(rd, rs, rt) \ + movn rd, rs, rt +#define MOVZ(rd, rs, rt) \ + movz rd, rs, rt +#else +#define MOVN(rd,rs,rt) \ + .set push; \ + .set reorder; \ + beqz rt,9f; \ + move rd,rs; \ + .set pop; \ +9: +#define MOVZ(rd,rs,rt) \ + .set push; \ + .set reorder; \ + bnez rt,9f; \ + move rd,rs; \ + .set pop; \ +9: +#endif + +/* + * Stack alignment + */ +#define ALSZ 7 +#define ALMASK ~7 + +/* + * Macros to handle different pointer/register sizes for 32/64-bit code + */ + +/* + * Size of a register + */ +#define SZREG 4 + +/* + * Use the following macros in assemblercode to load/store registers, + * pointers etc. + */ +#define REG_S sw +#define REG_L lw +#define REG_SUBU subu +#define REG_ADDU addu + +/* + * How to add/sub/load/store/shift C int variables. + */ +#define INT_ADD add +#define INT_ADDU addu +#define INT_ADDI addi +#define INT_ADDIU addiu +#define INT_SUB sub +#define INT_SUBU subu +#define INT_L lw +#define INT_S sw +#define INT_SLL sll +#define INT_SLLV sllv +#define INT_SRL srl +#define INT_SRLV srlv +#define INT_SRA sra +#define INT_SRAV srav + +/* + * How to add/sub/load/store/shift C long variables. + */ +#define LONG_ADD add +#define LONG_ADDU addu +#define LONG_ADDI addi +#define LONG_ADDIU addiu +#define LONG_SUB sub +#define LONG_SUBU subu +#define LONG_L lw +#define LONG_S sw +#define LONG_SLL sll +#define LONG_SLLV sllv +#define LONG_SRL srl +#define LONG_SRLV srlv +#define LONG_SRA sra +#define LONG_SRAV srav + +#define LONG .word +#define LONGSIZE 4 +#define LONGMASK 3 +#define LONGLOG 2 + +/* + * How to add/sub/load/store/shift pointers. + */ +#define PTR_ADD add +#define PTR_ADDU addu +#define PTR_ADDI addi +#define PTR_ADDIU addiu +#define PTR_SUB sub +#define PTR_SUBU subu +#define PTR_L lw +#define PTR_S sw +#define PTR_LA la +#define PTR_LI li +#define PTR_SLL sll +#define PTR_SLLV sllv +#define PTR_SRL srl +#define PTR_SRLV srlv +#define PTR_SRA sra +#define PTR_SRAV srav + +#define PTR_SCALESHIFT 2 + +#define PTR .word +#define PTRSIZE 4 +#define PTRLOG 2 + +/* + * Some cp0 registers were extended to 64bit for MIPS III. + */ +#define MFC0 mfc0 +#define MTC0 mtc0 + +#define SSNOP sll zero, zero, 1 + +#endif /* __ASM_ASM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/asmmacro-32.h b/target/linux/realtek/files/arch/rlx/include/asm/asmmacro-32.h new file mode 100644 index 000000000..055c1c7ca --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/asmmacro-32.h @@ -0,0 +1,41 @@ +/* + * asmmacro.h: Assembler macros to make things easier to read. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * Copyright (C) 1998, 1999, 2003 Ralf Baechle + */ +#ifndef _ASM_ASMMACRO_32_H +#define _ASM_ASMMACRO_32_H + +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/rlxregs.h> + + .macro cpu_save_nonscratch thread + LONG_S s0, THREAD_REG16(\thread) + LONG_S s1, THREAD_REG17(\thread) + LONG_S s2, THREAD_REG18(\thread) + LONG_S s3, THREAD_REG19(\thread) + LONG_S s4, THREAD_REG20(\thread) + LONG_S s5, THREAD_REG21(\thread) + LONG_S s6, THREAD_REG22(\thread) + LONG_S s7, THREAD_REG23(\thread) + LONG_S sp, THREAD_REG29(\thread) + LONG_S fp, THREAD_REG30(\thread) + .endm + + .macro cpu_restore_nonscratch thread + LONG_L s0, THREAD_REG16(\thread) + LONG_L s1, THREAD_REG17(\thread) + LONG_L s2, THREAD_REG18(\thread) + LONG_L s3, THREAD_REG19(\thread) + LONG_L s4, THREAD_REG20(\thread) + LONG_L s5, THREAD_REG21(\thread) + LONG_L s6, THREAD_REG22(\thread) + LONG_L s7, THREAD_REG23(\thread) + LONG_L sp, THREAD_REG29(\thread) + LONG_L fp, THREAD_REG30(\thread) + LONG_L ra, THREAD_REG31(\thread) + .endm + +#endif /* _ASM_ASMMACRO_32_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/asmmacro.h b/target/linux/realtek/files/arch/rlx/include/asm/asmmacro.h new file mode 100644 index 000000000..3583b9f78 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/asmmacro.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef _ASM_ASMMACRO_H +#define _ASM_ASMMACRO_H + +#include <asm/hazards.h> +#include <asm/asmmacro-32.h> + + .macro local_irq_enable reg=t0 + mfc0 \reg, CP0_STATUS + ori \reg, \reg, 1 + mtc0 \reg, CP0_STATUS + irq_enable_hazard + .endm + + .macro local_irq_disable reg=t0 + mfc0 \reg, CP0_STATUS + ori \reg, \reg, 1 + xori \reg, \reg, 1 + mtc0 \reg, CP0_STATUS + irq_disable_hazard + .endm + +#endif /* _ASM_ASMMACRO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/atomic.h b/target/linux/realtek/files/arch/rlx/include/asm/atomic.h new file mode 100644 index 000000000..b042aa26b --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/atomic.h @@ -0,0 +1,360 @@ +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + * + * But use these as seldom as possible since they are much more slower + * than regular operations. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle + */ +#ifndef _ASM_ATOMIC_H +#define _ASM_ATOMIC_H + +#include <linux/irqflags.h> +#include <linux/types.h> +#include <linux/linkage.h> +#include <asm/barrier.h> +#include <asm/cpu-features.h> +#include <asm/system.h> + +#define ATOMIC_INIT(i) { (i) } + +/* + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define atomic_read(v) ((v)->counter) + +/* + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v, i) ((v)->counter = (i)) + +/* + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic_add(int i, atomic_t * v) +{ +#ifdef CONFIG_CPU_HAS_LLSC + int temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %0, %1 # atomic_add \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " addu %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + " .set mips0 \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + + smp_llsc_mb(); +#else + unsigned long flags; + + raw_local_irq_save(flags); + v->counter += i; + raw_local_irq_restore(flags); +#endif +} + +/* + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic_sub(int i, atomic_t * v) +{ +#ifdef CONFIG_CPU_HAS_LLSC + int temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + "1: ll %0, %1 # atomic_sub \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " subu %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + + smp_llsc_mb(); +#else + unsigned long flags; + + raw_local_irq_save(flags); + v->counter -= i; + raw_local_irq_restore(flags); +#endif +} + +/* + * Same as above, but return the result value + */ +static __inline__ int atomic_add_return(int i, atomic_t * v) +{ +#ifdef CONFIG_CPU_HAS_LLSC + int result; + int temp; + smp_llsc_mb(); + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_add_return \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " addu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 2f \n" + " addu %0, %1, %3 \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + + smp_llsc_mb(); +#else + int result; + unsigned long flags; + + raw_local_irq_save(flags); + result = v->counter; + result += i; + v->counter = result; + raw_local_irq_restore(flags); +#endif + + return result; +} + +static __inline__ int atomic_sub_return(int i, atomic_t * v) +{ +#ifdef CONFIG_CPU_HAS_LLSC + int result; + int temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_return \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " subu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 2f \n" + " subu %0, %1, %3 \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + + smp_llsc_mb(); +#else + unsigned long flags; + int result; + + raw_local_irq_save(flags); + result = v->counter; + result -= i; + v->counter = result; + raw_local_irq_restore(flags); + +#endif + + return result; +} + +/* + * atomic_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. + */ +static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) +{ +#ifdef CONFIG_CPU_HAS_LLSC + int result; + int temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_if_positive\n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " subu %0, %1, %3 \n" + " bltz %0, 1f \n" + " sc %0, %2 \n" + " .set noreorder \n" + " beqz %0, 2f \n" + " subu %0, %1, %3 \n" + " .set reorder \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + "1: \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + + smp_llsc_mb(); +#else + unsigned long flags; + int result; + + raw_local_irq_save(flags); + result = v->counter; + result -= i; + if (result >= 0) + v->counter = result; + raw_local_irq_restore(flags); +#endif + + return result; +} + +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +/* + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +/* + * atomic_dec_and_test - decrement by 1 and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) + +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + */ +#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) + +/* + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +#define atomic_inc(v) atomic_add(1, (v)) + +/* + * atomic_dec - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +#define atomic_dec(v) atomic_sub(1, (v)) + +/* + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) + +/* + * atomic*_return operations are serializing but not the non-*_return + * versions. + */ +#define smp_mb__before_atomic_dec() smp_llsc_mb() +#define smp_mb__after_atomic_dec() smp_llsc_mb() +#define smp_mb__before_atomic_inc() smp_llsc_mb() +#define smp_mb__after_atomic_inc() smp_llsc_mb() + +#include <asm-generic/atomic-long.h> + +#endif /* _ASM_ATOMIC_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/auxvec.h b/target/linux/realtek/files/arch/rlx/include/asm/auxvec.h new file mode 100644 index 000000000..7cf7f2d21 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _ASM_AUXVEC_H +#define _ASM_AUXVEC_H + +#endif /* _ASM_AUXVEC_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/barrier.h b/target/linux/realtek/files/arch/rlx/include/asm/barrier.h new file mode 100644 index 000000000..0d897ceae --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/barrier.h @@ -0,0 +1,134 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + */ + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#ifdef CONFIG_CPU_HAS_SYNC +#define __sync() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "sync\n\t" \ + ".set pop" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#else +#define __sync() do { } while(0) +#endif + +#define __fast_iob() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "lw $0,%0\n\t" \ + "nop\n\t" \ + ".set pop" \ + : /* no output */ \ + : "m" (*(int *)CKSEG1) \ + : "memory") + +#define fast_wmb() __sync() +#define fast_rmb() __sync() +#define fast_mb() __sync() +#define fast_iob() \ + do { \ + __sync(); \ + __fast_iob(); \ + } while (0) + +#if defined(CONFIG_CPU_HAS_WB) + +#include <asm/wbflush.h> + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() wbflush() +#define iob() wbflush() + +#else /* !CONFIG_CPU_HAS_WB */ + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() fast_mb() +#define iob() fast_iob() + +#endif /* !CONFIG_CPU_HAS_WB */ + +/* tonywu: no barrier, let the CPU roll */ + +#define __WEAK_ORDERING_MB " \n" +#define __WEAK_LLSC_MB " \n" + +#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : : "memory") +#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : : "memory") +#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : : "memory") + +#define set_mb(var, value) \ + do { var = value; smp_mb(); } while (0) + +#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : : "memory") +#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : : "memory") +#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : : "memory") + +#endif /* __ASM_BARRIER_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bcache.h b/target/linux/realtek/files/arch/rlx/include/asm/bcache.h new file mode 100644 index 000000000..0ba9d6ef7 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bcache.h @@ -0,0 +1,60 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1997, 1999 by Ralf Baechle + * Copyright (c) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_BCACHE_H +#define _ASM_BCACHE_H + + +/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent, + chipset implemented caches. On machines with other CPUs the CPU does the + cache thing itself. */ +struct bcache_ops { + void (*bc_enable)(void); + void (*bc_disable)(void); + void (*bc_wback_inv)(unsigned long page, unsigned long size); + void (*bc_inv)(unsigned long page, unsigned long size); +}; + +extern void indy_sc_init(void); + +#ifdef CONFIG_BOARD_SCACHE + +extern struct bcache_ops *bcops; + +static inline void bc_enable(void) +{ + bcops->bc_enable(); +} + +static inline void bc_disable(void) +{ + bcops->bc_disable(); +} + +static inline void bc_wback_inv(unsigned long page, unsigned long size) +{ + bcops->bc_wback_inv(page, size); +} + +static inline void bc_inv(unsigned long page, unsigned long size) +{ + bcops->bc_inv(page, size); +} + +#else /* !defined(CONFIG_BOARD_SCACHE) */ + +/* Not R4000 / R4400 / R4600 / R5000. */ + +#define bc_enable() do { } while (0) +#define bc_disable() do { } while (0) +#define bc_wback_inv(page, size) do { } while (0) +#define bc_inv(page, size) do { } while (0) + +#endif /* !defined(CONFIG_BOARD_SCACHE) */ + +#endif /* _ASM_BCACHE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bitops.h b/target/linux/realtek/files/arch/rlx/include/asm/bitops.h new file mode 100644 index 000000000..d57b11ef6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bitops.h @@ -0,0 +1,567 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_BITOPS_H +#define _ASM_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/types.h> +#include <asm/barrier.h> +#include <asm/bug.h> +#include <asm/byteorder.h> /* sigh ... */ +#include <asm/cpu-features.h> +#include <asm/sgidefs.h> + +#define SZLONG_LOG 5 +#define SZLONG_MASK 31UL + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() smp_llsc_mb() +#define smp_mb__after_clear_bit() smp_llsc_mb() + +/* + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(unsigned long nr, volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned short bit = nr & SZLONG_MASK; + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # set_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " or %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (temp), "=m" (*m) + : "ir" (1UL << bit), "m" (*m)); +#else + volatile unsigned long *a = addr; + unsigned short bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a |= mask; + raw_local_irq_restore(flags); +#endif +} + +/* + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned short bit = nr & SZLONG_MASK; + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # clear_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " and %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (temp), "=m" (*m) + : "ir" (~(1UL << bit)), "m" (*m)); +#else + volatile unsigned long *a = addr; + unsigned short bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a &= ~mask; + raw_local_irq_restore(flags); +#endif +} + +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + smp_mb__before_clear_bit(); + clear_bit(nr, addr); +} + +/* + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned short bit = nr & SZLONG_MASK; + +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # change_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " xor %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (temp), "=m" (*m) + : "ir" (1UL << bit), "m" (*m)); +#else + volatile unsigned long *a = addr; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a ^= mask; + raw_local_irq_restore(flags); +#endif +} + +/* + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + ".align 2\n 1: ll %0, %1 # test_and_set_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + + " nop \n" +#endif + " or %2, %0, %3 \n" + " sc %2, %1 \n" + " beqz %2, 2f \n" + " and %2, %0, %3 \n" + " .subsection 2 \n" + ".align 2\n 2: b 1b \n" + " nop \n" + " .previous \n" + " .set pop \n" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << bit), "m" (*m) + : "memory"); + + smp_llsc_mb(); +#else + volatile unsigned long *a = addr; + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); +#endif + + return res != 0; +} + +/* + * test_and_set_bit_lock - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and implies acquire ordering semantics + * after the memory operation. + */ +static inline int test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + "1: ll %0, %1 # test_and_set_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + + " nop \n" +#endif + " or %2, %0, %3 \n" + " sc %2, %1 \n" + " beqz %2, 2f \n" + " and %2, %0, %3 \n" + " .subsection 2 \n" + "2: b 1b \n" + " nop \n" + " .previous \n" + " .set pop \n" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << bit), "m" (*m) + : "memory"); + + smp_llsc_mb(); +#else + volatile unsigned long *a = addr; + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); +#endif + + return res != 0; +} +/* + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + "1: ll %0, %1 # test_and_clear_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + + " nop \n" +#endif + " or %2, %0, %3 \n" + " xor %2, %3 \n" + " sc %2, %1 \n" + " beqz %2, 2f \n" + " and %2, %0, %3 \n" + " .subsection 2 \n" + "2: b 1b \n" + " nop \n" + " .previous \n" + " .set pop \n" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << bit), "m" (*m) + : "memory"); + + smp_llsc_mb(); +#else + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + volatile unsigned long *a = addr; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a &= ~mask; + raw_local_irq_restore(flags); +#endif + + return res != 0; +} + +/* + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); + unsigned long temp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + "1: ll %0, %1 # test_and_change_bit \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + + " nop \n" +#endif + " xor %2, %0, %3 \n" + " sc %2, %1 \n" + " beqz %2, 2f \n" + " and %2, %0, %3 \n" + " .subsection 2 \n" + "2: b 1b \n" + " nop \n" + " .previous \n" + " .set pop \n" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << bit), "m" (*m) + : "memory"); + + smp_llsc_mb(); +#else + unsigned short bit = nr & SZLONG_MASK; + unsigned long res; + volatile unsigned long *a = addr; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a ^= mask; + raw_local_irq_restore(flags); + +#endif + + return res != 0; +} + +#include <asm-generic/bitops/non-atomic.h> + +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + */ +static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + smp_mb(); + __clear_bit(nr, addr); +} + +/* + * Return the bit position (0..63) of the most significant 1 bit in a word + * Returns -1 if no 1 bit exists + */ +static inline unsigned long __fls(unsigned long word) +{ + int num; + +#if 0 /*CONFIG_CPU_HAS_RADIAX*/ + if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) + { + __asm__( + " .set push \n" + " .set mips32 \n" + " clz %0, %1 \n" + " .set pop \n" + : "=r" (num) + : "r" (word)); + + return 31 - num; + } +#endif + + num = BITS_PER_LONG - 1; + + if (!(word & (~0ul << (BITS_PER_LONG-16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG-8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG-4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG-2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG-1)))) + num -= 1; + return num; +} + +/* + * __ffs - find first bit in word. + * @word: The word to search + * + * Returns 0..SZLONG-1 + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __fls(word & -word); +} + +/* + * fls - find last bit set. + * @word: The word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static inline int fls(int x) +{ + int r; + +#if 0 /*CONFIG_CPU_HAS_RADIAX*/ + if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { + __asm__("clz %0, %1" : "=r" (x) : "r" (x)); + + return 32 - x; + } +#endif + + r = 32; + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#include <asm-generic/bitops/fls64.h> + +/* + * ffs - find first bit set. + * @word: The word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int word) +{ + if (!word) + return 0; + + return fls(word & -word); +} + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/find.h> + +#ifdef __KERNEL__ + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> + +#endif /* __KERNEL__ */ + +#endif /* _ASM_BITOPS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bitsperlong.h b/target/linux/realtek/files/arch/rlx/include/asm/bitsperlong.h new file mode 100644 index 000000000..3e4c10a8e --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_MIPS_BITSPERLONG_H +#define __ASM_MIPS_BITSPERLONG_H + +#define __BITS_PER_LONG _MIPS_SZLONG + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_MIPS_BITSPERLONG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bootinfo.h b/target/linux/realtek/files/arch/rlx/include/asm/bootinfo.h new file mode 100644 index 000000000..214a6420a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bootinfo.h @@ -0,0 +1,77 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 2003 by Ralf Baechle + * Copyright (C) 1995, 1996 Andreas Busse + * Copyright (C) 1995, 1996 Stoned Elipot + * Copyright (C) 1995, 1996 Paul M. Antoine. + */ +#ifndef _ASM_BOOTINFO_H +#define _ASM_BOOTINFO_H + +#include <linux/types.h> +#include <asm/setup.h> + +/* + * The MACH_ IDs are sort of equivalent to PCI product IDs. As such the + * numbers do not necessarily reflect technical relations or similarities + * between systems. + */ + +/* + * Valid machtype values for group unknown + */ +#define MACH_UNKNOWN 0 /* whatever... */ +#define CL_SIZE COMMAND_LINE_SIZE + +extern char *system_type; +const char *get_system_type(void); + +extern unsigned long mips_machtype; + +#define BOOT_MEM_MAP_MAX 32 +#define BOOT_MEM_RAM 1 +#define BOOT_MEM_ROM_DATA 2 +#define BOOT_MEM_RESERVED 3 + +/* + * A memory map that's built upon what was determined + * or specified on the command line. + */ +struct boot_mem_map { + int nr_map; + struct boot_mem_map_entry { + phys_t addr; /* start of memory segment */ + phys_t size; /* size of memory segment */ + long type; /* type of memory segment */ + } map[BOOT_MEM_MAP_MAX]; +}; + +extern struct boot_mem_map boot_mem_map; + +extern void add_memory_region(phys_t start, phys_t size, long type); + +extern void bsp_init(void); +extern void bsp_free_prom_memory(void); + +extern void free_init_pages(const char *what, + unsigned long begin, unsigned long end); + +/* + * Initial kernel command line, usually setup by bsp_init() + */ +extern char arcs_cmdline[CL_SIZE]; + +/* + * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware + */ +extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; + +/* + * Platform memory detection hook called by setup_arch + */ +extern void bsp_setup(void); + +#endif /* _ASM_BOOTINFO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/branch.h b/target/linux/realtek/files/arch/rlx/include/asm/branch.h new file mode 100644 index 000000000..37c6857c8 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/branch.h @@ -0,0 +1,38 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1998, 2001 by Ralf Baechle + */ +#ifndef _ASM_BRANCH_H +#define _ASM_BRANCH_H + +#include <asm/ptrace.h> + +static inline int delay_slot(struct pt_regs *regs) +{ + return regs->cp0_cause & CAUSEF_BD; +} + +static inline unsigned long exception_epc(struct pt_regs *regs) +{ + if (!delay_slot(regs)) + return regs->cp0_epc; + + return regs->cp0_epc + 4; +} + +extern int __compute_return_epc(struct pt_regs *regs); + +static inline int compute_return_epc(struct pt_regs *regs) +{ + if (!delay_slot(regs)) { + regs->cp0_epc += 4; + return 0; + } + + return __compute_return_epc(regs); +} + +#endif /* _ASM_BRANCH_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/break.h b/target/linux/realtek/files/arch/rlx/include/asm/break.h new file mode 100644 index 000000000..44437ed76 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/break.h @@ -0,0 +1,35 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 2003 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef __ASM_BREAK_H +#define __ASM_BREAK_H + +/* + * The following break codes are or were in use for specific purposes in + * other MIPS operating systems. Linux/MIPS doesn't use all of them. The + * unused ones are here as placeholders; we might encounter them in + * non-Linux/MIPS object files or make use of them in the future. + */ +#define BRK_USERBP 0 /* User bp (used by debuggers) */ +#define BRK_KERNELBP 1 /* Break in the kernel */ +#define BRK_ABORT 2 /* Sometimes used by abort(3) to SIGIOT */ +#define BRK_BD_TAKEN 3 /* For bd slot emulation - not implemented */ +#define BRK_BD_NOTTAKEN 4 /* For bd slot emulation - not implemented */ +#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */ +#define BRK_OVERFLOW 6 /* Overflow check */ +#define BRK_DIVZERO 7 /* Divide by zero check */ +#define BRK_RANGE 8 /* Range error check */ +#define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */ +#define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */ +#define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */ +#define BRK_BUG 512 /* Used by BUG() */ +#define BRK_KDB 513 /* Used in KDB_ENTER() */ +#define BRK_MEMU 514 /* Used by FPU emulator */ +#define BRK_MULOVF 1023 /* Multiply overflow */ + +#endif /* __ASM_BREAK_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bug.h b/target/linux/realtek/files/arch/rlx/include/asm/bug.h new file mode 100644 index 000000000..6cf29c26e --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bug.h @@ -0,0 +1,45 @@ +#ifndef __ASM_BUG_H +#define __ASM_BUG_H + +#include <linux/compiler.h> +#include <asm/sgidefs.h> + +#ifdef CONFIG_BUG + +#include <asm/break.h> + +static inline void __noreturn BUG(void) +{ + __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); + /* Fool GCC into thinking the function doesn't return. */ + while (1) + ; +} + +#define HAVE_ARCH_BUG + +#if (_MIPS_ISA > _MIPS_ISA_MIPS1) + +static inline void __BUG_ON(unsigned long condition) +{ + if (__builtin_constant_p(condition)) { + if (condition) + BUG(); + else + return; + } + __asm__ __volatile__("tne $0, %0, %1" + : : "r" (condition), "i" (BRK_BUG)); +} + +#define BUG_ON(C) __BUG_ON((unsigned long)(C)) + +#define HAVE_ARCH_BUG_ON + +#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */ + +#endif + +#include <asm-generic/bug.h> + +#endif /* __ASM_BUG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/bugs.h b/target/linux/realtek/files/arch/rlx/include/asm/bugs.h new file mode 100644 index 000000000..e489045ca --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/bugs.h @@ -0,0 +1,27 @@ +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Copyright (C) 2007 Maciej W. Rozycki + * + * Needs: + * void check_bugs(void); + */ +#ifndef _ASM_BUGS_H +#define _ASM_BUGS_H + +#include <linux/bug.h> +#include <linux/delay.h> +#include <linux/smp.h> + +#include <asm/cpu.h> +#include <asm/cpu-info.h> + + +static inline void check_bugs(void) +{ + unsigned int cpu = smp_processor_id(); + + cpu_data[cpu].udelay_val = loops_per_jiffy; +} + +#endif /* _ASM_BUGS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/byteorder.h b/target/linux/realtek/files/arch/rlx/include/asm/byteorder.h new file mode 100644 index 000000000..4c3b2cfd6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/byteorder.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 99, 2003 by Ralf Baechle + */ +#ifndef _ASM_BYTEORDER_H +#define _ASM_BYTEORDER_H + +#ifdef CONFIG_CPU_BIG_ENDIAN +#include <linux/byteorder/big_endian.h> +#else +#include <linux/byteorder/little_endian.h> +#endif + +#endif /* _ASM_BYTEORDER_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cache.h b/target/linux/realtek/files/arch/rlx/include/asm/cache.h new file mode 100644 index 000000000..389d7bd60 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cache.h @@ -0,0 +1,20 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1997, 98, 99, 2000, 2003 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_CACHE_H +#define _ASM_CACHE_H + +#include <kmalloc.h> + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_SHIFT L1_CACHE_SHIFT +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_CACHE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cachectl.h b/target/linux/realtek/files/arch/rlx/include/asm/cachectl.h new file mode 100644 index 000000000..f3ce72186 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cachectl.h @@ -0,0 +1,26 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle + */ +#ifndef _ASM_CACHECTL +#define _ASM_CACHECTL + +/* + * Options for cacheflush system call + */ +#define ICACHE (1<<0) /* flush instruction cache */ +#define DCACHE (1<<1) /* writeback and flush data cache */ +#define BCACHE (ICACHE|DCACHE) /* flush both caches */ + +/* + * Caching modes for the cachectl(2) call + * + * cachectl(2) is currently not supported and returns ENOSYS. + */ +#define CACHEABLE 0 /* make pages cacheable */ +#define UNCACHEABLE 1 /* make pages uncacheable */ + +#endif /* _ASM_CACHECTL */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cacheflush.h b/target/linux/realtek/files/arch/rlx/include/asm/cacheflush.h new file mode 100644 index 000000000..eca1bf897 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cacheflush.h @@ -0,0 +1,111 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_CACHEFLUSH_H +#define _ASM_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> +#include <asm/cpu-features.h> + +/* Cache flushing: + * + * - flush_cache_all() flushes entire cache + * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking + * - flush_cache_page(mm, vmaddr, pfn) flushes a single page + * - flush_cache_range(vma, start, end) flushes a range of pages + * - flush_icache_range(start, end) flush a range of instructions + * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache + * + * MIPS specific flush operations: + * + * - flush_cache_sigtramp() flush signal trampoline + * - flush_icache_all() flush the entire instruction cache + * - flush_data_cache_page() flushes a page from the data cache + */ +extern void (*flush_cache_all)(void); +extern void (*__flush_cache_all)(void); +extern void (*flush_cache_mm)(struct mm_struct *mm); +//#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0) +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) +extern void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); +extern void __flush_dcache_page(struct page *page); + +static inline void flush_dcache_page(struct page *page) +{ + __flush_dcache_page(page); +} + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define ARCH_HAS_FLUSH_ANON_PAGE +extern void __flush_anon_page(struct page *, unsigned long); +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + if (PageAnon(page)) + __flush_anon_page(page, vmaddr); +} + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + +extern void (*flush_icache_range)(unsigned long start, unsigned long end); +extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); + +extern void (*__flush_cache_vmap)(void); + +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} + +extern void (*__flush_cache_vunmap)(void); + +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} + +extern void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len); + +extern void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len); + +extern void (*flush_cache_sigtramp)(unsigned long addr); +extern void (*flush_icache_all)(void); +extern void (*local_flush_data_cache_page)(void * addr); +extern void (*flush_data_cache_page)(unsigned long addr); + +/* + * This flag is used to indicate that the page pointed to by a pte + * is dirty and requires cleaning before returning it to the user. + */ +#define PG_dcache_dirty PG_arch_1 + +#define Page_dcache_dirty(page) \ + test_bit(PG_dcache_dirty, &(page)->flags) +#define SetPageDcacheDirty(page) \ + set_bit(PG_dcache_dirty, &(page)->flags) +#define ClearPageDcacheDirty(page) \ + clear_bit(PG_dcache_dirty, &(page)->flags) + +/* Run kernel code uncached, useful for cache probing functions. */ +unsigned long run_uncached(void *func); + +extern void *kmap_coherent(struct page *page, unsigned long addr); +extern void kunmap_coherent(void); + +#endif /* _ASM_CACHEFLUSH_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cacheops.h b/target/linux/realtek/files/arch/rlx/include/asm/cacheops.h new file mode 100644 index 000000000..0b102eae0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cacheops.h @@ -0,0 +1,81 @@ +/* + * Cache operations for the cache instruction. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle + * (C) Copyright 1999 Silicon Graphics, Inc. + */ +#ifndef __ASM_CACHEOPS_H +#define __ASM_CACHEOPS_H + +/* + * Cache Operations available on all MIPS processors with R4000-style caches + */ +#define Index_Invalidate_I 0x00 +#define Index_Writeback_Inv_D 0x01 +#define Index_Load_Tag_I 0x04 +#define Index_Load_Tag_D 0x05 +#define Index_Store_Tag_I 0x08 +#define Index_Store_Tag_D 0x09 +#define Hit_Invalidate_I 0x10 +#define Hit_Invalidate_D 0x11 +#define Hit_Writeback_Inv_D 0x15 + +/* + * R4000-specific cacheops + */ +#define Create_Dirty_Excl_D 0x0d +#define Fill 0x14 +#define Hit_Writeback_I 0x18 +#define Hit_Writeback_D 0x19 + +/* + * R4000SC and R4400SC-specific cacheops + */ +#define Index_Invalidate_SI 0x02 +#define Index_Writeback_Inv_SD 0x03 +#define Index_Load_Tag_SI 0x06 +#define Index_Load_Tag_SD 0x07 +#define Index_Store_Tag_SI 0x0A +#define Index_Store_Tag_SD 0x0B +#define Create_Dirty_Excl_SD 0x0f +#define Hit_Invalidate_SI 0x12 +#define Hit_Invalidate_SD 0x13 +#define Hit_Writeback_Inv_SD 0x17 +#define Hit_Writeback_SD 0x1b +#define Hit_Set_Virtual_SI 0x1e +#define Hit_Set_Virtual_SD 0x1f + +/* + * R5000-specific cacheops + */ +#define R5K_Page_Invalidate_S 0x17 + +/* + * RM7000-specific cacheops + */ +#define Page_Invalidate_T 0x16 + +/* + * R10000-specific cacheops + * + * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused. + * Most of the _S cacheops are identical to the R4000SC _SD cacheops. + */ +#define Index_Writeback_Inv_S 0x03 +#define Index_Load_Tag_S 0x07 +#define Index_Store_Tag_S 0x0B +#define Hit_Invalidate_S 0x13 +#define Cache_Barrier 0x14 +#define Hit_Writeback_Inv_S 0x17 +#define Index_Load_Data_I 0x18 +#define Index_Load_Data_D 0x19 +#define Index_Load_Data_S 0x1b +#define Index_Store_Data_I 0x1c +#define Index_Store_Data_D 0x1d +#define Index_Store_Data_S 0x1f + +#endif /* __ASM_CACHEOPS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/checksum.h b/target/linux/realtek/files/arch/rlx/include/asm/checksum.h new file mode 100644 index 000000000..04bcbb534 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/checksum.h @@ -0,0 +1,255 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2001 Thiemo Seufer. + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef _ASM_CHECKSUM_H +#define _ASM_CHECKSUM_H + +#include <linux/in6.h> + +#include <asm/uaccess.h> + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum); + +__wsum __csum_partial_copy_user(const void *src, void *dst, + int len, __wsum sum, int *err_ptr); + +/* + * this is a new version of the above that records errors it finds in *errp, + * but continues and zeros the rest of the buffer. + */ + #if 1 +static inline +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, + __wsum sum, int *err_ptr) +{ + might_fault(); + return __csum_partial_copy_user((__force void *)src, dst, + len, sum, err_ptr); +} + #else + unsigned int csum_partial_copy_from_user (const unsigned char __user *src, + unsigned char *dst, int len, unsigned int sum, int *err_ptr); + #endif + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static inline +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + __wsum sum, int *err_ptr) +{ + might_fault(); + if (access_ok(VERIFY_WRITE, dst, len)) + return __csum_partial_copy_user(src, (__force void *)dst, + len, sum, err_ptr); + if (len) + *err_ptr = -EFAULT; + + return (__force __wsum)-1; /* invalid checksum */ +} + +/* + * the same as csum_partial, but copies from user space (but on MIPS + * we have just one address space, so this is identical to the above) + */ +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); + +/* + * Fold a partial checksum without adding pseudo headers + */ +static inline __sum16 csum_fold(__wsum sum) +{ + __asm__( + " .set push # csum_fold\n" + " .set noat \n" + " sll $1, %0, 16 \n" + " addu %0, $1 \n" + " sltu $1, %0, $1 \n" + " srl %0, %0, 16 \n" + " addu %0, $1 \n" + " xori %0, 0xffff \n" + " .set pop" + : "=r" (sum) + : "0" (sum)); + + return (__force __sum16)sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by + * Arnt Gulbrandsen. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + const unsigned int *word = iph; + const unsigned int *stop = word + ihl; + unsigned int csum; + int carry; + + csum = word[0]; + csum += word[1]; + carry = (csum < word[1]); + csum += carry; + + csum += word[2]; + carry = (csum < word[2]); + csum += carry; + + csum += word[3]; + carry = (csum < word[3]); + csum += carry; + + word += 4; + do { + csum += *word; + carry = (csum < *word); + csum += carry; + word++; + } while (word != stop); + + return csum_fold(csum); +} + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, + __be32 daddr, unsigned short len, unsigned short proto, + __wsum sum) +{ + __asm__( + " .set push # csum_tcpudp_nofold\n" + " .set noat \n" + " addu %0, %2 \n" + " sltu $1, %0, %2 \n" + " addu %0, $1 \n" + + " addu %0, %3 \n" + " sltu $1, %0, %3 \n" + " addu %0, $1 \n" + + " addu %0, %4 \n" + " sltu $1, %0, %4 \n" + " addu %0, $1 \n" + " .set pop" + : "=r" (sum) + : "0" ((__force unsigned long)daddr), + "r" ((__force unsigned long)saddr), +#ifdef __MIPSEL__ + "r" ((proto + len) << 8), +#else + "r" (proto + len), +#endif + "r" ((__force unsigned long)sum)); + + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + __asm__( + " .set push # csum_ipv6_magic\n" + " .set noreorder \n" + " .set noat \n" + " addu %0, %5 # proto (long in network byte order)\n" + " sltu $1, %0, %5 \n" + " addu %0, $1 \n" + + " addu %0, %6 # csum\n" + " sltu $1, %0, %6 \n" + " lw %1, 0(%2) # four words source address\n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 4(%2) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 8(%2) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 12(%2) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 0(%3) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 4(%3) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 8(%3) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " lw %1, 12(%3) \n" + " addu %0, $1 \n" + " addu %0, %1 \n" + " sltu $1, %0, %1 \n" + + " addu %0, $1 # Add final carry\n" + " .set pop" + : "=r" (sum), "=r" (proto) + : "r" (saddr), "r" (daddr), + "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); + + return csum_fold(sum); +} + +#endif /* _ASM_CHECKSUM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cmp.h b/target/linux/realtek/files/arch/rlx/include/asm/cmp.h new file mode 100644 index 000000000..89a73fb93 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cmp.h @@ -0,0 +1,18 @@ +#ifndef _ASM_CMP_H +#define _ASM_CMP_H + +/* + * Definitions for CMP multitasking on MIPS cores + */ +struct task_struct; + +extern void cmp_smp_setup(void); +extern void cmp_smp_finish(void); +extern void cmp_boot_secondary(int cpu, struct task_struct *t); +extern void cmp_init_secondary(void); +extern void cmp_cpus_done(void); +extern void cmp_prepare_cpus(unsigned int max_cpus); + +/* This is platform specific */ +extern void cmp_send_ipi(int cpu, unsigned int action); +#endif /* _ASM_CMP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cmpxchg.h b/target/linux/realtek/files/arch/rlx/include/asm/cmpxchg.h new file mode 100644 index 000000000..dbf73e889 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cmpxchg.h @@ -0,0 +1,116 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/irqflags.h> + +#define __HAVE_ARCH_CMPXCHG 1 + +#ifdef CONFIG_CPU_HAS_LLSC +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) +#define __cmpxchg_asm(ld, st, m, old, new) \ +({ \ + __typeof(*(m)) __ret; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips3 \n" \ + "1: " ld " %0, %2 # __cmpxchg_asm \n" \ + " nop \n" \ + " bne %0, %z3, 2f \n" \ + " .set mips0 \n" \ + " move $1, %z4 \n" \ + " .set mips3 \n" \ + " " st " $1, %1 \n" \ + " beqz $1, 3f \n" \ + "2: \n" \ + " .subsection 2 \n" \ + "3: b 1b \n" \ + " .previous \n" \ + " .set pop \n" \ + : "=&r" (__ret), "=R" (*m) \ + : "R" (*m), "Jr" (old), "Jr" (new) \ + : "memory"); \ + __ret; \ +}) +#else +#define __cmpxchg_asm(ld, st, m, old, new) \ +({ \ + __typeof(*(m)) __ret; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips3 \n" \ + "1: " ld " %0, %2 # __cmpxchg_asm \n" \ + " bne %0, %z3, 2f \n" \ + " .set mips0 \n" \ + " move $1, %z4 \n" \ + " .set mips3 \n" \ + " " st " $1, %1 \n" \ + " beqz $1, 3f \n" \ + "2: \n" \ + " .subsection 2 \n" \ + "3: b 1b \n" \ + " .previous \n" \ + " .set pop \n" \ + : "=&r" (__ret), "=R" (*m) \ + : "R" (*m), "Jr" (old), "Jr" (new) \ + : "memory"); \ + __ret; \ +}) +#endif +#else +#define __cmpxchg_asm(ld, st, m, old, new) \ +({ \ + __typeof(*(m)) __ret; \ + unsigned long __flags; \ + \ + raw_local_irq_save(__flags); \ + __ret = *m; \ + if (__ret == old) \ + *m = new; \ + raw_local_irq_restore(__flags); \ + \ + __ret; \ +}) +#endif + +#define __cmpxchg(ptr, old, new, barrier) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __res = 0; \ + \ + barrier; \ + \ + __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \ + \ + barrier; \ + \ + __res; \ +}) + +#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb()) +#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, ) + +#if 0 +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ + }) + +#include <asm-generic/cmpxchg-local.h> +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* __ASM_CMPXCHG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/compat-signal.h b/target/linux/realtek/files/arch/rlx/include/asm/compat-signal.h new file mode 100644 index 000000000..eae21e361 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/compat-signal.h @@ -0,0 +1,121 @@ +#ifndef __ASM_COMPAT_SIGNAL_H +#define __ASM_COMPAT_SIGNAL_H + +#include <linux/bug.h> +#include <linux/compat.h> +#include <linux/compiler.h> + +#include <asm/signal.h> +#include <asm/siginfo.h> + +#include <asm/uaccess.h> + +#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) + +typedef struct compat_siginfo { + int si_signo; + int si_code; + int si_errno; + + union { + int _pad[SI_PAD_SIZE32]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + compat_uid_t _uid; /* sender's uid */ + } _kill; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + compat_uid_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + +#if 0 + /* IRIX SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + compat_clock_t _utime; + int _status; /* exit code */ + compat_clock_t _stime; + } _irix_sigchld; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + s32 _addr; /* faulting insn/memory ref. */ + } _sigfault; + + /* SIGPOLL, SIGXFSZ (To do ...) */ + struct { + int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + /* POSIX.1b timers */ + struct { + timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval;/* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + compat_uid_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + } _sifields; +} compat_siginfo_t; + +static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d, + const sigset_t *s) +{ + int err; + + BUG_ON(sizeof(*d) != sizeof(*s)); + BUG_ON(_NSIG_WORDS != 2); + + err = __put_user(s->sig[0], &d->sig[0]); + err |= __put_user(s->sig[0] >> 32, &d->sig[1]); + err |= __put_user(s->sig[1], &d->sig[2]); + err |= __put_user(s->sig[1] >> 32, &d->sig[3]); + + return err; +} + +static inline int __copy_conv_sigset_from_user(sigset_t *d, + const compat_sigset_t __user *s) +{ + int err; + union sigset_u { + sigset_t s; + compat_sigset_t c; + } *u = (union sigset_u *) d; + + BUG_ON(sizeof(*d) != sizeof(*s)); + BUG_ON(_NSIG_WORDS != 2); + +#ifdef CONFIG_CPU_BIG_ENDIAN + err = __get_user(u->c.sig[1], &s->sig[0]); + err |= __get_user(u->c.sig[0], &s->sig[1]); + err |= __get_user(u->c.sig[3], &s->sig[2]); + err |= __get_user(u->c.sig[2], &s->sig[3]); +#endif +#ifdef CONFIG_CPU_LITTLE_ENDIAN + err = __get_user(u->c.sig[0], &s->sig[0]); + err |= __get_user(u->c.sig[1], &s->sig[1]); + err |= __get_user(u->c.sig[2], &s->sig[2]); + err |= __get_user(u->c.sig[3], &s->sig[3]); +#endif + + return err; +} + +#endif /* __ASM_COMPAT_SIGNAL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/compat.h b/target/linux/realtek/files/arch/rlx/include/asm/compat.h new file mode 100644 index 000000000..f58aed354 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/compat.h @@ -0,0 +1,227 @@ +#ifndef _ASM_COMPAT_H +#define _ASM_COMPAT_H +/* + * Architecture specific compatibility types + */ +#include <linux/thread_info.h> +#include <linux/types.h> +#include <asm/page.h> +#include <asm/ptrace.h> + +#define COMPAT_USER_HZ 100 + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_time_t; +typedef s32 compat_clock_t; +typedef s32 compat_suseconds_t; + +typedef s32 compat_pid_t; +typedef s32 __compat_uid_t; +typedef s32 __compat_gid_t; +typedef __compat_uid_t __compat_uid32_t; +typedef __compat_gid_t __compat_gid32_t; +typedef u32 compat_mode_t; +typedef u32 compat_ino_t; +typedef u32 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef u32 compat_nlink_t; +typedef s32 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef s32 compat_caddr_t; +typedef struct { + s32 val[2]; +} compat_fsid_t; +typedef s32 compat_timer_t; +typedef s32 compat_key_t; + +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef s64 compat_s64; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u64 compat_u64; + +struct compat_timespec { + compat_time_t tv_sec; + s32 tv_nsec; +}; + +struct compat_timeval { + compat_time_t tv_sec; + s32 tv_usec; +}; + +struct compat_stat { + compat_dev_t st_dev; + s32 st_pad1[3]; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + s32 st_pad2[2]; + compat_off_t st_size; + s32 st_pad3; + compat_time_t st_atime; + s32 st_atime_nsec; + compat_time_t st_mtime; + s32 st_mtime_nsec; + compat_time_t st_ctime; + s32 st_ctime_nsec; + s32 st_blksize; + s32 st_blocks; + s32 st_pad4[14]; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + s32 l_sysid; + compat_pid_t l_pid; + short __unused; + s32 pad[4]; +}; + +#define F_GETLK64 33 +#define F_SETLK64 34 +#define F_SETLKW64 35 + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +}; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_frsize; + int f_blocks; + int f_bfree; + int f_files; + int f_ffree; + int f_bavail; + compat_fsid_t f_fsid; + int f_namelen; + int f_spare[6]; +}; + +#define COMPAT_RLIM_INFINITY 0x7fffffffUL + +typedef u32 compat_old_sigset_t; /* at least 32 bits */ + +#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */ +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +#define COMPAT_OFF_T_MAX 0x7fffffff +#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL + +/* + * A pointer passed in from user mode. This should not + * be used for syscall parameters, just declare them + * as pointers because the syscall entry code will have + * appropriately converted them already. + */ +typedef u32 compat_uptr_t; + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + /* cast to a __user pointer via "unsigned long" makes sparse happy */ + return (void __user *)(unsigned long)(long)uptr; +} + +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + +static inline void __user *compat_alloc_user_space(long len) +{ + struct pt_regs *regs = (struct pt_regs *) + ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; + + return (void __user *) (regs->regs[29] - len); +} + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + compat_mode_t mode; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_time_t sem_otime; + compat_time_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif + compat_time_t msg_stime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif + compat_time_t msg_rtime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_time_t msg_ctime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_time_t shm_atime; + compat_time_t shm_dtime; + compat_time_t shm_ctime; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_32BIT); +} + +#endif /* _ASM_COMPAT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/compiler.h b/target/linux/realtek/files/arch/rlx/include/asm/compiler.h new file mode 100644 index 000000000..71f5c5cfc --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/compiler.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2004, 2007 Maciej W. Rozycki + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_COMPILER_H +#define _ASM_COMPILER_H + +#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) +#define GCC_IMM_ASM() "n" +#define GCC_REG_ACCUM "$0" +#else +#define GCC_IMM_ASM() "rn" +#define GCC_REG_ACCUM "accum" +#endif + +#endif /* _ASM_COMPILER_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cpu-features.h b/target/linux/realtek/files/arch/rlx/include/asm/cpu-features.h new file mode 100644 index 000000000..412ef6b9a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cpu-features.h @@ -0,0 +1,147 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef __ASM_CPU_FEATURES_H +#define __ASM_CPU_FEATURES_H + +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <bspcpu.h> + +#ifndef current_cpu_type +#define current_cpu_type() current_cpu_data.cputype +#endif + +#define cpu_has_tlb 1 +#define cpu_has_3k_cache 1 +#define cpu_has_mips16 1 + +#ifdef CONFIG_CPU_HAS_EJTAG +#define cpu_has_ejtag 1 +#else +#define cpu_has_ejtag 0 +#endif + +#ifdef CONFIG_CPU_HAS_LLSC +#define cpu_has_llsc 1 +#else +#define cpu_has_llsc 0 +#endif + +#ifndef cpu_dcache_line_size +#define cpu_dcache_line_size() cpu_dcache_line +#endif +#ifndef cpu_icache_line_size +#define cpu_icache_line_size() cpu_icache_line +#endif +#ifndef cpu_scache_line_size +#define cpu_scache_line_size() cpu_scache_line +#endif + +#ifndef cpu_scache_line +#define cpu_scache_line 0 +#endif + +#ifndef cpu_dcache_line +#define cpu_dcache_line 0 +#endif + +#ifndef cpu_icache_line +#define cpu_icache_line 0 +#endif + +#ifndef cpu_scache_size +#define cpu_scache_size 0 +#endif + +#ifndef cpu_dcache_size +#define cpu_dcache_size 0 +#endif + +#ifndef cpu_icache_size +#define cpu_icache_size 0 +#endif + +#ifndef cpu_tlb_entry +#define cpu_tlb_entry 0 +#endif + +/* tonywu: below does not matter, should set to zero except icache snoop */ +#define cpu_has_4kex 0 +#define cpu_has_4k_cache 0 +#define cpu_has_6k_cache 0 +#define cpu_has_8k_cache 0 +#define cpu_has_tx39_cache 0 +#define cpu_has_octeon_cache 0 +#define cpu_has_fpu 0 +#define raw_cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 0 +#define cpu_has_watch 0 +#define cpu_has_divec 0 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_prefetch 0 +#define cpu_has_mcheck 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_pindexed_dcache 0 + +/* + * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors + * such as the R10000 have I-Caches that snoop local stores; the embedded ones + * don't. For maintaining I-cache coherency this means we need to flush the + * D-cache all the way back to whever the I-cache does refills from, so the + * I-cache has a chance to see the new data at all. Then we have to flush the + * I-cache also. + * Note we may have been rescheduled and may no longer be running on the CPU + * that did the store so we can't optimize this into only doing the flush on + * the local CPU. + */ +#define cpu_icache_snoops_remote_store 1 +#define cpu_has_mips32r1 0 +#define cpu_has_mips32r2 0 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +/* + * Shortcuts ... + */ +#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) +#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) +#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) +#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) +#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ + cpu_has_mips64r1 | cpu_has_mips64r2) + +/* + * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other + * pre-MIPS32/MIPS53 processors have CLO, CLZ. For 64-bit kernels + * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. + */ +#define cpu_has_clo_clz 0 +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 +#define cpu_has_userlocal 0 + +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_has_vint 0 +#define cpu_has_veic 0 +#define cpu_has_inclusive_pcaches 0 + +#endif /* __ASM_CPU_FEATURES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cpu-info.h b/target/linux/realtek/files/arch/rlx/include/asm/cpu-info.h new file mode 100644 index 000000000..6a99a308c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cpu-info.h @@ -0,0 +1,74 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 Waldorf GMBH + * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle + * Copyright (C) 1996 Paul M. Antoine + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef __ASM_CPU_INFO_H +#define __ASM_CPU_INFO_H + +#include <linux/types.h> + +#include <asm/cache.h> + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned int waysize; /* Bytes per way */ + unsigned short sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char waybit; /* Bits to select in a cache set */ + unsigned char flags; /* Flags describing cache properties */ +}; + +/* + * Flag definitions + */ +#define MIPS_CACHE_NOT_PRESENT 0x00000001 +#define MIPS_CACHE_VTAG 0x00000002 /* Virtually tagged cache */ +#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */ +#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */ +#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */ +#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ + +struct cpuinfo_mips { + unsigned int udelay_val; + unsigned int asid_cache; + + /* + * Capability and feature descriptor structure for MIPS CPU + */ + unsigned long options; + unsigned long ases; + unsigned int processor_id; + unsigned int fpu_id; + unsigned int cputype; + int isa_level; + int tlbsize; + struct cache_desc icache; /* Primary I-cache */ + struct cache_desc dcache; /* Primary D or combined I/D cache */ + struct cache_desc scache; /* Secondary cache */ + struct cache_desc tcache; /* Tertiary/split secondary cache */ + int srsets; /* Shadow register sets */ + int core; /* physical core number */ + void *data; /* Additional data */ +} __attribute__((aligned(L1_CACHE_BYTES))); + +extern struct cpuinfo_mips cpu_data[]; +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] + +extern void cpu_probe(void); +extern void cpu_report(void); + +extern const char *__cpu_name[]; +#define cpu_name_string() __cpu_name[smp_processor_id()] + +#endif /* __ASM_CPU_INFO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cpu.h b/target/linux/realtek/files/arch/rlx/include/asm/cpu.h new file mode 100644 index 000000000..665a3256a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cpu.h @@ -0,0 +1,124 @@ +/* + * cpu.h: Values of the PRId register used to match up + * various MIPS cpu types. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef _ASM_CPU_H +#define _ASM_CPU_H + +/* Assigned Company values for bits 23:16 of the PRId Register + (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from + MTI, the PRId register is defined in this (backwards compatible) + way: + + +----------------+----------------+----------------+----------------+ + | Company Options| Company ID | Processor ID | Revision | + +----------------+----------------+----------------+----------------+ + 31 24 23 16 15 8 7 + + I don't have docs for all the previous processors, but my impression is + that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64 + spec. +*/ + +#define PRID_IMP_RLX4180 0xc100 +#define PRID_IMP_RLX4181 0xcd00 +#define PRID_IMP_RLX5181 0xcf00 +#define PRID_IMP_RLX5280 0xc600 +#define PRID_IMP_RLX5281 0xdc01 +#define PRID_IMP_RLX4281 0xdc02 + +#define PRID_IMP_UNKNOWN 0xff00 + +/* + * FPU implementation/revision register (CP1 control register 0). + * + * +---------------------------------+----------------+----------------+ + * | 0 | Implementation | Revision | + * +---------------------------------+----------------+----------------+ + * 31 16 15 8 7 0 + */ + +#define FPIR_IMP_NONE 0x0000 + +#define CPU_UNKNOWN 0 +#define CPU_RLX4180 1 +#define CPU_RLX4181 2 +#define CPU_RLX5181 3 +#define CPU_RLX5280 4 +#define CPU_RLX5281 5 +#define CPU_RLX4281 6 +#define CPU_LAST 6 + +#if 0 +enum cpu_type_enum { + CPU_UNKNOWN, + CPU_RLX4180, + CPU_RLX4181, + CPU_RLX5181, + CPU_RLX5280, + CPU_RLX5281, + CPU_RLX4281, + CPU_LAST +}; +#endif + +/* + * ISA Level encodings + * + */ +#define MIPS_CPU_ISA_I 0x00000001 +#define MIPS_CPU_ISA_II 0x00000002 +#define MIPS_CPU_ISA_III 0x00000004 +#define MIPS_CPU_ISA_IV 0x00000008 +#define MIPS_CPU_ISA_V 0x00000010 +#define MIPS_CPU_ISA_M32R1 0x00000020 +#define MIPS_CPU_ISA_M32R2 0x00000040 +#define MIPS_CPU_ISA_M64R1 0x00000080 +#define MIPS_CPU_ISA_M64R2 0x00000100 + +#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \ + MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 ) +#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ + MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) + +/* + * CPU Option encodings + */ +#define MIPS_CPU_TLB 0x00000001 /* CPU has TLB */ +#define MIPS_CPU_4KEX 0x00000002 /* "R4K" exception model */ +#define MIPS_CPU_3K_CACHE 0x00000004 /* R3000-style caches */ +#define MIPS_CPU_4K_CACHE 0x00000008 /* R4000-style caches */ +#define MIPS_CPU_TX39_CACHE 0x00000010 /* TX3900-style caches */ +#define MIPS_CPU_FPU 0x00000020 /* CPU has FPU */ +#define MIPS_CPU_32FPR 0x00000040 /* 32 dbl. prec. FP registers */ +#define MIPS_CPU_COUNTER 0x00000080 /* Cycle count/compare */ +#define MIPS_CPU_WATCH 0x00000100 /* watchpoint registers */ +#define MIPS_CPU_DIVEC 0x00000200 /* dedicated interrupt vector */ +#define MIPS_CPU_VCE 0x00000400 /* virt. coherence conflict possible */ +#define MIPS_CPU_CACHE_CDEX_P 0x00000800 /* Create_Dirty_Exclusive CACHE op */ +#define MIPS_CPU_CACHE_CDEX_S 0x00001000 /* ... same for seconary cache ... */ +#define MIPS_CPU_MCHECK 0x00002000 /* Machine check exception */ +#define MIPS_CPU_EJTAG 0x00004000 /* EJTAG exception */ +#define MIPS_CPU_NOFPUEX 0x00008000 /* no FPU exception */ +#define MIPS_CPU_LLSC 0x00010000 /* CPU has ll/sc instructions */ +#define MIPS_CPU_INCLUSIVE_CACHES 0x00020000 /* P-cache subset enforced */ +#define MIPS_CPU_PREFETCH 0x00040000 /* CPU has usable prefetch */ +#define MIPS_CPU_VINT 0x00080000 /* CPU supports MIPSR2 vectored interrupts */ +#define MIPS_CPU_VEIC 0x00100000 /* CPU supports MIPSR2 external interrupt controller mode */ +#define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */ + +/* + * CPU ASE encodings + */ +#define MIPS_ASE_MIPS16 0x00000001 /* code compression */ +#define MIPS_ASE_MDMX 0x00000002 /* MIPS digital media extension */ +#define MIPS_ASE_MIPS3D 0x00000004 /* MIPS-3D */ +#define MIPS_ASE_SMARTMIPS 0x00000008 /* SmartMIPS */ +#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */ +#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */ + + +#endif /* _ASM_CPU_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/cputime.h b/target/linux/realtek/files/arch/rlx/include/asm/cputime.h new file mode 100644 index 000000000..c00eacbdd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef __MIPS_CPUTIME_H +#define __MIPS_CPUTIME_H + +#include <asm-generic/cputime.h> + +#endif /* __MIPS_CPUTIME_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/current.h b/target/linux/realtek/files/arch/rlx/include/asm/current.h new file mode 100644 index 000000000..559db66b9 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/current.h @@ -0,0 +1,23 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998, 2002 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_CURRENT_H +#define _ASM_CURRENT_H + +#include <linux/thread_info.h> + +struct task_struct; + +static inline struct task_struct * get_current(void) +{ + return current_thread_info()->task; +} + +#define current get_current() + +#endif /* _ASM_CURRENT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/debug.h b/target/linux/realtek/files/arch/rlx/include/asm/debug.h new file mode 100644 index 000000000..1fd5a2b39 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/debug.h @@ -0,0 +1,48 @@ +/* + * Debug macros for run-time debugging. + * Turned on/off with CONFIG_RUNTIME_DEBUG option. + * + * Copyright (C) 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef _ASM_DEBUG_H +#define _ASM_DEBUG_H + + +/* + * run-time macros for catching spurious errors. Eable CONFIG_RUNTIME_DEBUG in + * kernel hacking config menu to use them. + * + * Use them as run-time debugging aid. NEVER USE THEM AS ERROR HANDLING CODE!!! + */ + +#ifdef CONFIG_RUNTIME_DEBUG + +#include <linux/kernel.h> + +#define db_assert(x) if (!(x)) { \ + panic("assertion failed at %s:%d: %s", __FILE__, __LINE__, #x); } +#define db_warn(x) if (!(x)) { \ + printk(KERN_WARNING "warning at %s:%d: %s", __FILE__, __LINE__, #x); } +#define db_verify(x, y) db_assert(x y) +#define db_verify_warn(x, y) db_warn(x y) +#define db_run(x) do { x; } while (0) + +#else + +#define db_assert(x) +#define db_warn(x) +#define db_verify(x, y) x +#define db_verify_warn(x, y) x +#define db_run(x) + +#endif + +#endif /* _ASM_DEBUG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/delay.h b/target/linux/realtek/files/arch/rlx/include/asm/delay.h new file mode 100644 index 000000000..e7cd78277 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/delay.h @@ -0,0 +1,32 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 by Waldorf Electronics + * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + */ +#ifndef _ASM_DELAY_H +#define _ASM_DELAY_H + +#include <linux/param.h> + +extern void __delay(unsigned int loops); +extern void __ndelay(unsigned int ns); +extern void __udelay(unsigned int us); + +#define ndelay(ns) __ndelay(ns) +#define udelay(us) __udelay(us) + +/* make sure "usecs *= ..." in udelay do not overflow. */ +#if HZ >= 1000 +#define MAX_UDELAY_MS 1 +#elif HZ <= 200 +#define MAX_UDELAY_MS 5 +#else +#define MAX_UDELAY_MS (1000 / HZ) +#endif + +#endif /* _ASM_DELAY_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/device.h b/target/linux/realtek/files/arch/rlx/include/asm/device.h new file mode 100644 index 000000000..d8f9872b0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include <asm-generic/device.h> + diff --git a/target/linux/realtek/files/arch/rlx/include/asm/div64.h b/target/linux/realtek/files/arch/rlx/include/asm/div64.h new file mode 100644 index 000000000..5134267fd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/div64.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2000, 2004 Maciej W. Rozycki + * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_DIV64_H +#define __ASM_DIV64_H + +#include <asm-generic/div64.h> + +#endif /* __ASM_DIV64_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/dma-mapping.h b/target/linux/realtek/files/arch/rlx/include/asm/dma-mapping.h new file mode 100644 index 000000000..d16afddb0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/dma-mapping.h @@ -0,0 +1,86 @@ +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H + +#include <asm/scatterlist.h> +#include <asm/cache.h> + +void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); +extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); +extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction direction); + +static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction) +{ + dma_unmap_single(dev, dma_address, size, direction); +} + +extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction); +extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction); +extern void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); +extern void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction); +extern void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction); +extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction direction); +extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction direction); +extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); +extern int dma_supported(struct device *dev, u64 mask); + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +static inline int +dma_get_cache_alignment(void) +{ + /* XXX Largest on any MIPS */ + return 128; +} + +extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); + +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); + +#if 0 +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY + +extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags); +extern void dma_release_declared_memory(struct device *dev); +extern void * dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); +#endif + +#endif /* _ASM_DMA_MAPPING_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/dma.h b/target/linux/realtek/files/arch/rlx/include/asm/dma.h new file mode 100644 index 000000000..004059461 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/dma.h @@ -0,0 +1,310 @@ +/* + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + * + * NOTE: all this is true *only* for ISA/EISA expansions on Mips boards + * and can only be used for expansion cards. Onboard DMA controllers, such + * as the R4030 on Jazz boards behave totally different! + */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include <asm/io.h> /* need byte IO */ +#include <linux/spinlock.h> /* And spinlocks */ +#include <linux/delay.h> +#include <asm/system.h> + + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN +#define MAX_DMA_CHANNELS 8 +#endif + +/* + * The maximum address in KSEG0 that we can perform a DMA transfer to on this + * platform. This describes only the PC style part of the DMA logic like on + * Deskstations or Acer PICA but not the much more versatile DMA logic used + * for the local devices on Acer PICA or Magnums. + */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) +#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS)) +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while holding the DMA lock ! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr<=3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr&3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +{ + switch(dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + set_dma_page(dmanr, a>>16); + if (dmanr <= 3) { + dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + } else { + dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + } +} + + +/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + } else { + dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE + : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr<=3)? count : (count<<1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_DMA_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/elf.h b/target/linux/realtek/files/arch/rlx/include/asm/elf.h new file mode 100644 index 000000000..b934084b6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/elf.h @@ -0,0 +1,283 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Much of this is taken from binutils and GNU libc ... + */ +#ifndef _ASM_ELF_H +#define _ASM_ELF_H + + +/* ELF header e_flags defines. */ +/* MIPS architecture level. */ +#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ +#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ +#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ +#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ +#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ +#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ +#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ +#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */ +#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */ + +/* The ABI of a file. */ +#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ +#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ + +#define PT_MIPS_REGINFO 0x70000000 +#define PT_MIPS_RTPROC 0x70000001 +#define PT_MIPS_OPTIONS 0x70000002 + +/* Flags in the e_flags field of the header */ +#define EF_MIPS_NOREORDER 0x00000001 +#define EF_MIPS_PIC 0x00000002 +#define EF_MIPS_CPIC 0x00000004 +#define EF_MIPS_ABI2 0x00000020 +#define EF_MIPS_OPTIONS_FIRST 0x00000080 +#define EF_MIPS_32BITMODE 0x00000100 +#define EF_MIPS_ABI 0x0000f000 +#define EF_MIPS_ARCH 0xf0000000 + +#define DT_MIPS_RLD_VERSION 0x70000001 +#define DT_MIPS_TIME_STAMP 0x70000002 +#define DT_MIPS_ICHECKSUM 0x70000003 +#define DT_MIPS_IVERSION 0x70000004 +#define DT_MIPS_FLAGS 0x70000005 + #define RHF_NONE 0x00000000 + #define RHF_HARDWAY 0x00000001 + #define RHF_NOTPOT 0x00000002 + #define RHF_SGI_ONLY 0x00000010 +#define DT_MIPS_BASE_ADDRESS 0x70000006 +#define DT_MIPS_CONFLICT 0x70000008 +#define DT_MIPS_LIBLIST 0x70000009 +#define DT_MIPS_LOCAL_GOTNO 0x7000000a +#define DT_MIPS_CONFLICTNO 0x7000000b +#define DT_MIPS_LIBLISTNO 0x70000010 +#define DT_MIPS_SYMTABNO 0x70000011 +#define DT_MIPS_UNREFEXTNO 0x70000012 +#define DT_MIPS_GOTSYM 0x70000013 +#define DT_MIPS_HIPAGENO 0x70000014 +#define DT_MIPS_RLD_MAP 0x70000016 + +#define R_MIPS_NONE 0 +#define R_MIPS_16 1 +#define R_MIPS_32 2 +#define R_MIPS_REL32 3 +#define R_MIPS_26 4 +#define R_MIPS_HI16 5 +#define R_MIPS_LO16 6 +#define R_MIPS_GPREL16 7 +#define R_MIPS_LITERAL 8 +#define R_MIPS_GOT16 9 +#define R_MIPS_PC16 10 +#define R_MIPS_CALL16 11 +#define R_MIPS_GPREL32 12 +/* The remaining relocs are defined on Irix, although they are not + in the MIPS ELF ABI. */ +#define R_MIPS_UNUSED1 13 +#define R_MIPS_UNUSED2 14 +#define R_MIPS_UNUSED3 15 +#define R_MIPS_SHIFT5 16 +#define R_MIPS_SHIFT6 17 +#define R_MIPS_64 18 +#define R_MIPS_GOT_DISP 19 +#define R_MIPS_GOT_PAGE 20 +#define R_MIPS_GOT_OFST 21 +/* + * The following two relocation types are specified in the MIPS ABI + * conformance guide version 1.2 but not yet in the psABI. + */ +#define R_MIPS_GOTHI16 22 +#define R_MIPS_GOTLO16 23 +#define R_MIPS_SUB 24 +#define R_MIPS_INSERT_A 25 +#define R_MIPS_INSERT_B 26 +#define R_MIPS_DELETE 27 +#define R_MIPS_HIGHER 28 +#define R_MIPS_HIGHEST 29 +/* + * The following two relocation types are specified in the MIPS ABI + * conformance guide version 1.2 but not yet in the psABI. + */ +#define R_MIPS_CALLHI16 30 +#define R_MIPS_CALLLO16 31 +/* + * This range is reserved for vendor specific relocations. + */ +#define R_MIPS_LOVENDOR 100 +#define R_MIPS_HIVENDOR 127 + +#define SHN_MIPS_ACCOMON 0xff00 /* Allocated common symbols */ +#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */ +#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */ +#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */ +#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */ + +#define SHT_MIPS_LIST 0x70000000 +#define SHT_MIPS_CONFLICT 0x70000002 +#define SHT_MIPS_GPTAB 0x70000003 +#define SHT_MIPS_UCODE 0x70000004 +#define SHT_MIPS_DEBUG 0x70000005 +#define SHT_MIPS_REGINFO 0x70000006 +#define SHT_MIPS_PACKAGE 0x70000007 +#define SHT_MIPS_PACKSYM 0x70000008 +#define SHT_MIPS_RELD 0x70000009 +#define SHT_MIPS_IFACE 0x7000000b +#define SHT_MIPS_CONTENT 0x7000000c +#define SHT_MIPS_OPTIONS 0x7000000d +#define SHT_MIPS_SHDR 0x70000010 +#define SHT_MIPS_FDESC 0x70000011 +#define SHT_MIPS_EXTSYM 0x70000012 +#define SHT_MIPS_DENSE 0x70000013 +#define SHT_MIPS_PDESC 0x70000014 +#define SHT_MIPS_LOCSYM 0x70000015 +#define SHT_MIPS_AUXSYM 0x70000016 +#define SHT_MIPS_OPTSYM 0x70000017 +#define SHT_MIPS_LOCSTR 0x70000018 +#define SHT_MIPS_LINE 0x70000019 +#define SHT_MIPS_RFDESC 0x7000001a +#define SHT_MIPS_DELTASYM 0x7000001b +#define SHT_MIPS_DELTAINST 0x7000001c +#define SHT_MIPS_DELTACLASS 0x7000001d +#define SHT_MIPS_DWARF 0x7000001e +#define SHT_MIPS_DELTADECL 0x7000001f +#define SHT_MIPS_SYMBOL_LIB 0x70000020 +#define SHT_MIPS_EVENTS 0x70000021 +#define SHT_MIPS_TRANSLATE 0x70000022 +#define SHT_MIPS_PIXIE 0x70000023 +#define SHT_MIPS_XLATE 0x70000024 +#define SHT_MIPS_XLATE_DEBUG 0x70000025 +#define SHT_MIPS_WHIRL 0x70000026 +#define SHT_MIPS_EH_REGION 0x70000027 +#define SHT_MIPS_XLATE_OLD 0x70000028 +#define SHT_MIPS_PDR_EXCEPTION 0x70000029 + +#define SHF_MIPS_GPREL 0x10000000 +#define SHF_MIPS_MERGE 0x20000000 +#define SHF_MIPS_ADDR 0x40000000 +#define SHF_MIPS_STRING 0x80000000 +#define SHF_MIPS_NOSTRIP 0x08000000 +#define SHF_MIPS_LOCAL 0x04000000 +#define SHF_MIPS_NAMES 0x02000000 +#define SHF_MIPS_NODUPES 0x01000000 + +#ifndef ELF_ARCH +/* ELF register definitions */ +#define ELF_NGREG 45 +#define ELF_NFPREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (__h->e_machine != EM_MIPS) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + if ((__h->e_flags & EF_MIPS_ABI2) != 0) \ + __res = 0; \ + if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ + ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ + __res = 0; \ + \ + __res; \ +}) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +/* + * These are used to set parameters in the core dumps. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif +#define ELF_ARCH EM_MIPS + +#endif /* !defined(ELF_ARCH) */ + +struct mips_abi; + +extern struct mips_abi mips_abi; +extern struct mips_abi mips_abi_32; +extern struct mips_abi mips_abi_n32; + +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX); \ + \ + current->thread.abi = &mips_abi; \ +} while (0) + +struct task_struct; + +extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); +extern int dump_task_regs(struct task_struct *, elf_gregset_t *); +extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); + +#ifndef ELF_CORE_COPY_REGS +#define ELF_CORE_COPY_REGS(elf_regs, regs) \ + elf_dump_regs((elf_greg_t *)&(elf_regs), regs); +#endif +#ifndef ELF_CORE_COPY_TASK_REGS +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) +#endif + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +/* + * See comments in asm-alpha/elf.h, this is the same thing + * on the MIPS. + */ +#define ELF_PLAT_INIT(_r, load_addr) do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#ifndef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#endif /* _ASM_ELF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/emergency-restart.h b/target/linux/realtek/files/arch/rlx/include/asm/emergency-restart.h new file mode 100644 index 000000000..108d8c48e --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include <asm-generic/emergency-restart.h> + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/errno.h b/target/linux/realtek/files/arch/rlx/include/asm/errno.h new file mode 100644 index 000000000..7f3e15769 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/errno.h @@ -0,0 +1,132 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle + */ +#ifndef _ASM_ERRNO_H +#define _ASM_ERRNO_H + +/* + * These error numbers are intended to be MIPS ABI compatible + */ + +#include <asm-generic/errno-base.h> + +#define ENOMSG 35 /* No message of desired type */ +#define EIDRM 36 /* Identifier removed */ +#define ECHRNG 37 /* Channel number out of range */ +#define EL2NSYNC 38 /* Level 2 not synchronized */ +#define EL3HLT 39 /* Level 3 halted */ +#define EL3RST 40 /* Level 3 reset */ +#define ELNRNG 41 /* Link number out of range */ +#define EUNATCH 42 /* Protocol driver not attached */ +#define ENOCSI 43 /* No CSI structure available */ +#define EL2HLT 44 /* Level 2 halted */ +#define EDEADLK 45 /* Resource deadlock would occur */ +#define ENOLCK 46 /* No record locks available */ +#define EBADE 50 /* Invalid exchange */ +#define EBADR 51 /* Invalid request descriptor */ +#define EXFULL 52 /* Exchange full */ +#define ENOANO 53 /* No anode */ +#define EBADRQC 54 /* Invalid request code */ +#define EBADSLT 55 /* Invalid slot */ +#define EDEADLOCK 56 /* File locking deadlock error */ +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EDOTDOT 73 /* RFS specific error */ +#define EMULTIHOP 74 /* Multihop attempted */ +#define EBADMSG 77 /* Not a data message */ +#define ENAMETOOLONG 78 /* File name too long */ +#define EOVERFLOW 79 /* Value too large for defined data type */ +#define ENOTUNIQ 80 /* Name not unique on network */ +#define EBADFD 81 /* File descriptor in bad state */ +#define EREMCHG 82 /* Remote address changed */ +#define ELIBACC 83 /* Can not access a needed shared library */ +#define ELIBBAD 84 /* Accessing a corrupted shared library */ +#define ELIBSCN 85 /* .lib section in a.out corrupted */ +#define ELIBMAX 86 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 87 /* Cannot exec a shared library directly */ +#define EILSEQ 88 /* Illegal byte sequence */ +#define ENOSYS 89 /* Function not implemented */ +#define ELOOP 90 /* Too many symbolic links encountered */ +#define ERESTART 91 /* Interrupted system call should be restarted */ +#define ESTRPIPE 92 /* Streams pipe error */ +#define ENOTEMPTY 93 /* Directory not empty */ +#define EUSERS 94 /* Too many users */ +#define ENOTSOCK 95 /* Socket operation on non-socket */ +#define EDESTADDRREQ 96 /* Destination address required */ +#define EMSGSIZE 97 /* Message too long */ +#define EPROTOTYPE 98 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 99 /* Protocol not available */ +#define EPROTONOSUPPORT 120 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 121 /* Socket type not supported */ +#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 123 /* Protocol family not supported */ +#define EAFNOSUPPORT 124 /* Address family not supported by protocol */ +#define EADDRINUSE 125 /* Address already in use */ +#define EADDRNOTAVAIL 126 /* Cannot assign requested address */ +#define ENETDOWN 127 /* Network is down */ +#define ENETUNREACH 128 /* Network is unreachable */ +#define ENETRESET 129 /* Network dropped connection because of reset */ +#define ECONNABORTED 130 /* Software caused connection abort */ +#define ECONNRESET 131 /* Connection reset by peer */ +#define ENOBUFS 132 /* No buffer space available */ +#define EISCONN 133 /* Transport endpoint is already connected */ +#define ENOTCONN 134 /* Transport endpoint is not connected */ +#define EUCLEAN 135 /* Structure needs cleaning */ +#define ENOTNAM 137 /* Not a XENIX named type file */ +#define ENAVAIL 138 /* No XENIX semaphores available */ +#define EISNAM 139 /* Is a named type file */ +#define EREMOTEIO 140 /* Remote I/O error */ +#define EINIT 141 /* Reserved */ +#define EREMDEV 142 /* Error 142 */ +#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 144 /* Too many references: cannot splice */ +#define ETIMEDOUT 145 /* Connection timed out */ +#define ECONNREFUSED 146 /* Connection refused */ +#define EHOSTDOWN 147 /* Host is down */ +#define EHOSTUNREACH 148 /* No route to host */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EALREADY 149 /* Operation already in progress */ +#define EINPROGRESS 150 /* Operation now in progress */ +#define ESTALE 151 /* Stale NFS file handle */ +#define ECANCELED 158 /* AIO operation canceled */ + +/* + * These error are Linux extensions. + */ +#define ENOMEDIUM 159 /* No medium found */ +#define EMEDIUMTYPE 160 /* Wrong medium type */ +#define ENOKEY 161 /* Required key not available */ +#define EKEYEXPIRED 162 /* Key has expired */ +#define EKEYREVOKED 163 /* Key has been revoked */ +#define EKEYREJECTED 164 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 165 /* Owner died */ +#define ENOTRECOVERABLE 166 /* State not recoverable */ + +#define ERFKILL 167 /* Operation not possible due to RF-kill */ +#define EDQUOT 1133 /* Quota exceeded */ + +#ifdef __KERNEL__ + +/* The biggest error number defined here or in <linux/errno.h>. */ +#define EMAXERRNO 1133 + +#endif /* __KERNEL__ */ + +#endif /* _ASM_ERRNO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fb.h b/target/linux/realtek/files/arch/rlx/include/asm/fb.h new file mode 100644 index 000000000..bd3f68c9d --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fb.h @@ -0,0 +1,19 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ + +#include <linux/fb.h> +#include <linux/fs.h> +#include <asm/page.h> + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +} + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fcntl.h b/target/linux/realtek/files/arch/rlx/include/asm/fcntl.h new file mode 100644 index 000000000..ea13798c6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fcntl.h @@ -0,0 +1,57 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2003, 05 Ralf Baechle + */ +#ifndef _ASM_FCNTL_H +#define _ASM_FCNTL_H + + +#define O_APPEND 0x0008 +#define O_SYNC 0x0010 +#define O_NONBLOCK 0x0080 +#define O_CREAT 0x0100 /* not fcntl */ +#define O_TRUNC 0x0200 /* not fcntl */ +#define O_EXCL 0x0400 /* not fcntl */ +#define O_NOCTTY 0x0800 /* not fcntl */ +#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ +#define O_LARGEFILE 0x2000 /* allow large file opens */ +#define O_DIRECT 0x8000 /* direct disk access hint */ + +#define F_GETLK 14 +#define F_SETLK 6 +#define F_SETLKW 7 + +#define F_SETOWN 24 /* for sockets. */ +#define F_GETOWN 23 /* for sockets. */ + +#ifndef __mips64 +#define F_GETLK64 33 /* using 'struct flock64' */ +#define F_SETLK64 34 +#define F_SETLKW64 35 +#endif + +/* + * The flavours of struct flock. "struct flock" is the ABI compliant + * variant. Finally struct flock64 is the LFS variant of struct flock. As + * a historic accident and inconsistence with the ABI definition it doesn't + * contain all the same fields as struct flock. + */ + +struct flock { + short l_type; + short l_whence; + off_t l_start; + off_t l_len; + long l_sysid; + __kernel_pid_t l_pid; + long pad[4]; +}; + +#define HAVE_ARCH_STRUCT_FLOCK + +#include <asm-generic/fcntl.h> + +#endif /* _ASM_FCNTL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fixmap.h b/target/linux/realtek/files/arch/rlx/include/asm/fixmap.h new file mode 100644 index 000000000..1e0f590bd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fixmap.h @@ -0,0 +1,113 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#include <asm/page.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#define FIX_N_COLOURS 8 + FIX_CMAP_BEGIN, + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, +#ifdef CONFIG_HIGHMEM + /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_BEGIN = FIX_CMAP_END + 1, + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#endif + __end_of_fixed_addresses +}; + +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + __this_fixmap_does_not_exist(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) + +/* + * Called from pgtable_init() + */ +extern void fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base); + + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/floppy.h b/target/linux/realtek/files/arch/rlx/include/asm/floppy.h new file mode 100644 index 000000000..992d232ad --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/floppy.h @@ -0,0 +1,56 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 - 2000 Ralf Baechle + */ +#ifndef _ASM_FLOPPY_H +#define _ASM_FLOPPY_H + +#include <linux/dma-mapping.h> + +static inline void fd_cacheflush(char * addr, long size) +{ + dma_cache_sync(NULL, addr, size, DMA_BIDIRECTIONAL); +} + +#define MAX_BUFFER_SECTORS 24 + + +/* + * And on Mips's the CMOS info fails also ... + * + * FIXME: This information should come from the ARC configuration tree + * or whereever a particular machine has stored this ... + */ +#define FLOPPY0_TYPE fd_drive_type(0) +#define FLOPPY1_TYPE fd_drive_type(1) + +#define FDC1 fd_getfdaddr1(); + +#define N_FDC 1 /* do you *really* want a second controller? */ +#define N_DRIVE 8 + +/* + * The DMA channel used by the floppy controller cannot access data at + * addresses >= 16MB + * + * Went back to the 1MB limit, as some people had problems with the floppy + * driver otherwise. It doesn't matter much for performance anyway, as most + * floppy accesses go through the track buffer. + * + * On MIPSes using vdma, this actually means that *all* transfers go thru + * the * track buffer since 0x1000000 is always smaller than KSEG0/1. + * Actually this needs to be a bit more complicated since the so much different + * hardware available with MIPS CPUs ... + */ +#define CROSS_64KB(a, s) ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64) + +#define EXTRA_FLOPPY_PARAMS + +#include <floppy.h> + +#endif /* _ASM_FLOPPY_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ftrace.h b/target/linux/realtek/files/arch/rlx/include/asm/ftrace.h new file mode 100644 index 000000000..40a8c178f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ftrace.h @@ -0,0 +1 @@ +/* empty */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/futex.h b/target/linux/realtek/files/arch/rlx/include/asm/futex.h new file mode 100644 index 000000000..bab40d064 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/futex.h @@ -0,0 +1,160 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/barrier.h> +#include <asm/errno.h> + +#ifdef CONFIG_CPU_HAS_LLSC +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips3 \n" \ + "1: ll %1, %4 # __futex_atomic_op \n" \ +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" \ +#endif + " .set mips0 \n" \ + " " insn " \n" \ + " .set mips3 \n" \ + "2: sc $1, %2 \n" \ + " beqz $1, 1b \n" \ + __WEAK_LLSC_MB \ + "3: \n" \ + " .set pop \n" \ + " .set mips0 \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li %0, %6 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 4b \n" \ + " "__UA_ADDR "\t2b, 4b \n" \ + " .previous \n" \ + : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ + : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ + : "memory"); \ +} +#else +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + ret = -ENOSYS; \ +} +#endif + +static inline int +futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); + break; + + case FUTEX_OP_ADD: + __futex_atomic_op("addu $1, %1, %z5", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or $1, %1, %z5", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and $1, %1, %z5", + ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor $1, %1, %z5", + ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + int retval; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + +#ifdef CONFIG_CPU_HAS_LLSC + __asm__ __volatile__( + "# futex_atomic_cmpxchg_inatomic \n" + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " bne %0, %z3, 3f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + "2: sc $1, %1 \n" + " beqz $1, 1b \n" + __WEAK_LLSC_MB + "3: \n" + " .set pop \n" + " .section .fixup,\"ax\" \n" + "4: li %0, %5 \n" + " j 3b \n" + " .previous \n" + " .section __ex_table,\"a\" \n" + " "__UA_ADDR "\t1b, 4b \n" + " "__UA_ADDR "\t2b, 4b \n" + " .previous \n" + : "=&r" (retval), "=R" (*uaddr) + : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) + : "memory"); +#else + return -ENOSYS; +#endif + + return retval; +} + +#endif +#endif /* _ASM_FUTEX_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/hinv.h b/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/hinv.h new file mode 100644 index 000000000..e6ff4add0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/hinv.h @@ -0,0 +1,175 @@ +/* + * ARCS hardware/memory inventory/configuration and system ID definitions. + */ +#ifndef _ASM_ARC_HINV_H +#define _ASM_ARC_HINV_H + +#include <asm/sgidefs.h> +#include <asm/fw/arc/types.h> + +/* configuration query defines */ +typedef enum configclass { + SystemClass, + ProcessorClass, + CacheClass, +#ifndef _NT_PROM + MemoryClass, + AdapterClass, + ControllerClass, + PeripheralClass +#else /* _NT_PROM */ + AdapterClass, + ControllerClass, + PeripheralClass, + MemoryClass +#endif /* _NT_PROM */ +} CONFIGCLASS; + +typedef enum configtype { + ARC, + CPU, + FPU, + PrimaryICache, + PrimaryDCache, + SecondaryICache, + SecondaryDCache, + SecondaryCache, +#ifndef _NT_PROM + Memory, +#endif + EISAAdapter, + TCAdapter, + SCSIAdapter, + DTIAdapter, + MultiFunctionAdapter, + DiskController, + TapeController, + CDROMController, + WORMController, + SerialController, + NetworkController, + DisplayController, + ParallelController, + PointerController, + KeyboardController, + AudioController, + OtherController, + DiskPeripheral, + FloppyDiskPeripheral, + TapePeripheral, + ModemPeripheral, + MonitorPeripheral, + PrinterPeripheral, + PointerPeripheral, + KeyboardPeripheral, + TerminalPeripheral, + LinePeripheral, + NetworkPeripheral, +#ifdef _NT_PROM + Memory, +#endif + OtherPeripheral, + + /* new stuff for IP30 */ + /* added without moving anything */ + /* except ANONYMOUS. */ + + XTalkAdapter, + PCIAdapter, + GIOAdapter, + TPUAdapter, + + Anonymous +} CONFIGTYPE; + +typedef enum { + Failed = 1, + ReadOnly = 2, + Removable = 4, + ConsoleIn = 8, + ConsoleOut = 16, + Input = 32, + Output = 64 +} IDENTIFIERFLAG; + +#ifndef NULL /* for GetChild(NULL); */ +#define NULL 0 +#endif + +union key_u { + struct { +#ifdef _MIPSEB + unsigned char c_bsize; /* block size in lines */ + unsigned char c_lsize; /* line size in bytes/tag */ + unsigned short c_size; /* cache size in 4K pages */ +#else /* _MIPSEL */ + unsigned short c_size; /* cache size in 4K pages */ + unsigned char c_lsize; /* line size in bytes/tag */ + unsigned char c_bsize; /* block size in lines */ +#endif /* _MIPSEL */ + } cache; + ULONG FullKey; +}; + +#if _MIPS_SIM == _MIPS_SIM_ABI64 +#define SGI_ARCS_VERS 64 /* sgi 64-bit version */ +#define SGI_ARCS_REV 0 /* rev .00 */ +#else +#define SGI_ARCS_VERS 1 /* first version */ +#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */ +#endif + +typedef struct component { + CONFIGCLASS Class; + CONFIGTYPE Type; + IDENTIFIERFLAG Flags; + USHORT Version; + USHORT Revision; + ULONG Key; + ULONG AffinityMask; + ULONG ConfigurationDataSize; + ULONG IdentifierLength; + char *Identifier; +} COMPONENT; + +/* internal structure that holds pathname parsing data */ +struct cfgdata { + char *name; /* full name */ + int minlen; /* minimum length to match */ + CONFIGTYPE type; /* type of token */ +}; + +/* System ID */ +typedef struct systemid { + CHAR VendorId[8]; + CHAR ProductId[8]; +} SYSTEMID; + +/* memory query functions */ +typedef enum memorytype { + ExceptionBlock, + SPBPage, /* ARCS == SystemParameterBlock */ +#ifndef _NT_PROM + FreeContiguous, + FreeMemory, + BadMemory, + LoadedProgram, + FirmwareTemporary, + FirmwarePermanent +#else /* _NT_PROM */ + FreeMemory, + BadMemory, + LoadedProgram, + FirmwareTemporary, + FirmwarePermanent, + FreeContiguous +#endif /* _NT_PROM */ +} MEMORYTYPE; + +typedef struct memorydescriptor { + MEMORYTYPE Type; + LONG BasePage; + LONG PageCount; +} MEMORYDESCRIPTOR; + +#endif /* _ASM_ARC_HINV_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/types.h b/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/types.h new file mode 100644 index 000000000..b9adcd6f0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fw/arc/types.h @@ -0,0 +1,86 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright 1999 Ralf Baechle (ralf@gnu.org) + * Copyright 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_ARC_TYPES_H +#define _ASM_ARC_TYPES_H + + +#ifdef CONFIG_ARC32 + +typedef char CHAR; +typedef short SHORT; +typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__))); +typedef long LONG __attribute__ ((__mode__ (__SI__))); +typedef unsigned char UCHAR; +typedef unsigned short USHORT; +typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__))); +typedef void VOID; + +/* The pointer types. Note that we're using a 64-bit compiler but all + pointer in the ARC structures are only 32-bit, so we need some disgusting + workarounds. Keep your vomit bag handy. */ +typedef LONG _PCHAR; +typedef LONG _PSHORT; +typedef LONG _PLARGE_INTEGER; +typedef LONG _PLONG; +typedef LONG _PUCHAR; +typedef LONG _PUSHORT; +typedef LONG _PULONG; +typedef LONG _PVOID; + +#endif /* CONFIG_ARC32 */ + +#ifdef CONFIG_ARC64 + +typedef char CHAR; +typedef short SHORT; +typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__))); +typedef long LONG __attribute__ ((__mode__ (__DI__))); +typedef unsigned char UCHAR; +typedef unsigned short USHORT; +typedef unsigned long ULONG __attribute__ ((__mode__ (__DI__))); +typedef void VOID; + +/* The pointer types. We're 64-bit and the firmware is also 64-bit, so + live is sane ... */ +typedef CHAR *_PCHAR; +typedef SHORT *_PSHORT; +typedef LARGE_INTEGER *_PLARGE_INTEGER; +typedef LONG *_PLONG; +typedef UCHAR *_PUCHAR; +typedef USHORT *_PUSHORT; +typedef ULONG *_PULONG; +typedef VOID *_PVOID; + +#endif /* CONFIG_ARC64 */ + +typedef CHAR *PCHAR; +typedef SHORT *PSHORT; +typedef LARGE_INTEGER *PLARGE_INTEGER; +typedef LONG *PLONG; +typedef UCHAR *PUCHAR; +typedef USHORT *PUSHORT; +typedef ULONG *PULONG; +typedef VOID *PVOID; + +/* + * Return type of ArcGetDisplayStatus() + */ +typedef struct { + USHORT CursorXPosition; + USHORT CursorYPosition; + USHORT CursorMaxXPosition; + USHORT CursorMaxYPosition; + USHORT ForegroundColor; + USHORT BackgroundColor; + UCHAR HighIntensity; + UCHAR Underscored; + UCHAR ReverseVideo; +} DISPLAY_STATUS; + +#endif /* _ASM_ARC_TYPES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_api.h b/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_api.h new file mode 100644 index 000000000..0995575db --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_api.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2000, 2001, 2002 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +/* + * Broadcom Common Firmware Environment (CFE) + * + * This file contains declarations for doing callbacks to + * cfe from an application. It should be the only header + * needed by the application to use this library + * + * Authors: Mitch Lichtenberg, Chris Demetriou + */ +#ifndef CFE_API_H +#define CFE_API_H + +#include <linux/types.h> +#include <linux/string.h> + +typedef long intptr_t; + + +/* + * Constants + */ + +/* Seal indicating CFE's presence, passed to user program. */ +#define CFE_EPTSEAL 0x43464531 + +#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */ +#define CFE_MI_AVAILABLE 1 /* memory is available */ + +#define CFE_FLG_WARMSTART 0x00000001 +#define CFE_FLG_FULL_ARENA 0x00000001 +#define CFE_FLG_ENV_PERMANENT 0x00000001 + +#define CFE_CPU_CMD_START 1 +#define CFE_CPU_CMD_STOP 0 + +#define CFE_STDHANDLE_CONSOLE 0 + +#define CFE_DEV_NETWORK 1 +#define CFE_DEV_DISK 2 +#define CFE_DEV_FLASH 3 +#define CFE_DEV_SERIAL 4 +#define CFE_DEV_CPU 5 +#define CFE_DEV_NVRAM 6 +#define CFE_DEV_CLOCK 7 +#define CFE_DEV_OTHER 8 +#define CFE_DEV_MASK 0x0F + +#define CFE_CACHE_FLUSH_D 1 +#define CFE_CACHE_INVAL_I 2 +#define CFE_CACHE_INVAL_D 4 +#define CFE_CACHE_INVAL_L2 8 + +#define CFE_FWI_64BIT 0x00000001 +#define CFE_FWI_32BIT 0x00000002 +#define CFE_FWI_RELOC 0x00000004 +#define CFE_FWI_UNCACHED 0x00000008 +#define CFE_FWI_MULTICPU 0x00000010 +#define CFE_FWI_FUNCSIM 0x00000020 +#define CFE_FWI_RTLSIM 0x00000040 + +typedef struct { + int64_t fwi_version; /* major, minor, eco version */ + int64_t fwi_totalmem; /* total installed mem */ + int64_t fwi_flags; /* various flags */ + int64_t fwi_boardid; /* board ID */ + int64_t fwi_bootarea_va; /* VA of boot area */ + int64_t fwi_bootarea_pa; /* PA of boot area */ + int64_t fwi_bootarea_size; /* size of boot area */ +} cfe_fwinfo_t; + + +/* + * Defines and prototypes for functions which take no arguments. + */ +int64_t cfe_getticks(void); + +/* + * Defines and prototypes for the rest of the functions. + */ +int cfe_close(int handle); +int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1); +int cfe_cpu_stop(int cpu); +int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen); +int cfe_enummem(int idx, int flags, uint64_t * start, uint64_t * length, + uint64_t * type); +int cfe_exit(int warm, int status); +int cfe_flushcache(int flg); +int cfe_getdevinfo(char *name); +int cfe_getenv(char *name, char *dest, int destlen); +int cfe_getfwinfo(cfe_fwinfo_t * info); +int cfe_getstdhandle(int flg); +int cfe_init(uint64_t handle, uint64_t ept); +int cfe_inpstat(int handle); +int cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer, + int length, int *retlen, uint64_t offset); +int cfe_open(char *name); +int cfe_read(int handle, unsigned char *buffer, int length); +int cfe_readblk(int handle, int64_t offset, unsigned char *buffer, + int length); +int cfe_setenv(char *name, char *val); +int cfe_write(int handle, unsigned char *buffer, int length); +int cfe_writeblk(int handle, int64_t offset, unsigned char *buffer, + int length); + +#endif /* CFE_API_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_error.h b/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_error.h new file mode 100644 index 000000000..b80374636 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/fw/cfe/cfe_error.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2000, 2001, 2002 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * Broadcom Common Firmware Environment (CFE) + * + * CFE's global error code list is here. + * + * Author: Mitch Lichtenberg + */ + +#define CFE_OK 0 +#define CFE_ERR -1 /* generic error */ +#define CFE_ERR_INV_COMMAND -2 +#define CFE_ERR_EOF -3 +#define CFE_ERR_IOERR -4 +#define CFE_ERR_NOMEM -5 +#define CFE_ERR_DEVNOTFOUND -6 +#define CFE_ERR_DEVOPEN -7 +#define CFE_ERR_INV_PARAM -8 +#define CFE_ERR_ENVNOTFOUND -9 +#define CFE_ERR_ENVREADONLY -10 + +#define CFE_ERR_NOTELF -11 +#define CFE_ERR_NOT32BIT -12 +#define CFE_ERR_WRONGENDIAN -13 +#define CFE_ERR_BADELFVERS -14 +#define CFE_ERR_NOTMIPS -15 +#define CFE_ERR_BADELFFMT -16 +#define CFE_ERR_BADADDR -17 + +#define CFE_ERR_FILENOTFOUND -18 +#define CFE_ERR_UNSUPPORTED -19 + +#define CFE_ERR_HOSTUNKNOWN -20 + +#define CFE_ERR_TIMEOUT -21 + +#define CFE_ERR_PROTOCOLERR -22 + +#define CFE_ERR_NETDOWN -23 +#define CFE_ERR_NONAMESERVER -24 + +#define CFE_ERR_NOHANDLES -25 +#define CFE_ERR_ALREADYBOUND -26 + +#define CFE_ERR_CANNOTSET -27 +#define CFE_ERR_NOMORE -28 +#define CFE_ERR_BADFILESYS -29 +#define CFE_ERR_FSNOTAVAIL -30 + +#define CFE_ERR_INVBOOTBLOCK -31 +#define CFE_ERR_WRONGDEVTYPE -32 +#define CFE_ERR_BBCHECKSUM -33 +#define CFE_ERR_BOOTPROGCHKSUM -34 + +#define CFE_ERR_LDRNOTAVAIL -35 + +#define CFE_ERR_NOTREADY -36 + +#define CFE_ERR_GETMEM -37 +#define CFE_ERR_SETMEM -38 + +#define CFE_ERR_NOTCONN -39 +#define CFE_ERR_ADDRINUSE -40 diff --git a/target/linux/realtek/files/arch/rlx/include/asm/gpio.h b/target/linux/realtek/files/arch/rlx/include/asm/gpio.h new file mode 100644 index 000000000..06e46faf8 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/gpio.h @@ -0,0 +1,6 @@ +#ifndef __ASM_MIPS_GPIO_H +#define __ASM_MIPS_GPIO_H + +#include <gpio.h> + +#endif /* __ASM_MIPS_GPIO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/hardirq.h b/target/linux/realtek/files/arch/rlx/include/asm/hardirq.h new file mode 100644 index 000000000..90bf399e6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/hardirq.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1997, 98, 99, 2000, 01, 05 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_HARDIRQ_H +#define _ASM_HARDIRQ_H + +#include <linux/threads.h> +#include <linux/irq.h> + +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +extern void ack_bad_irq(unsigned int irq); + +#endif /* _ASM_HARDIRQ_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/hazards.h b/target/linux/realtek/files/arch/rlx/include/asm/hazards.h new file mode 100644 index 000000000..5a3542960 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/hazards.h @@ -0,0 +1,66 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) MIPS Technologies, Inc. + * written by Ralf Baechle <ralf@linux-mips.org> + */ +#ifndef _ASM_HAZARDS_H +#define _ASM_HAZARDS_H + +#ifdef __ASSEMBLY__ +#define ASMMACRO(name, code...) .macro name; code; .endm +#else + +#include <asm/cpu-features.h> + +#define ASMMACRO(name, code...) \ +__asm__(".macro " #name "; " #code "; .endm"); \ + \ +static inline void name(void) \ +{ \ + __asm__ __volatile__ (#name); \ +} + +#endif + +ASMMACRO(_ssnop, + sll $0, $0, 1 + ) + +ASMMACRO(_ehb, + sll $0, $0, 3 + ) + +/* + * Finally the catchall case for all other processors including R4000, R4400, + * R4600, R4700, R5000, RM7000, NEC VR41xx etc. + * + * The taken branch will result in a two cycle penalty for the two killed + * instructions on R4000 / R4400. Other processors only have a single cycle + * hazard so this is nice trick to have an optimal code for a range of + * processors. + */ +ASMMACRO(mtc0_tlbw_hazard, + nop; nop + ) +ASMMACRO(tlbw_use_hazard, + nop; nop; nop + ) +ASMMACRO(tlb_probe_hazard, + nop; nop; nop + ) +ASMMACRO(irq_enable_hazard, + _ssnop; _ssnop; _ssnop; + ) +ASMMACRO(irq_disable_hazard, + nop; nop; nop + ) +ASMMACRO(back_to_back_c0_hazard, + _ssnop; _ssnop; _ssnop; + ) +#define instruction_hazard() do { } while (0) + +#endif /* _ASM_HAZARDS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/highmem.h b/target/linux/realtek/files/arch/rlx/include/asm/highmem.h new file mode 100644 index 000000000..25adfb029 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/highmem.h @@ -0,0 +1,69 @@ +/* + * highmem.h: virtual kernel memory mappings for high memory + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + */ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <asm/kmap_types.h> + +/* undef for production */ +#define HIGHMEM_DEBUG 1 + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +#define LAST_PKMAP 1024 +#define LAST_PKMAP_MASK (LAST_PKMAP-1) +#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +extern void * kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +extern void *__kmap(struct page *page); +extern void __kunmap(struct page *page); +extern void *__kmap_atomic(struct page *page, enum km_type type); +extern void __kunmap_atomic(void *kvaddr, enum km_type type); +extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +extern struct page *__kmap_atomic_to_page(void *ptr); + +#define kmap __kmap +#define kunmap __kunmap +#define kmap_atomic __kmap_atomic +#define kunmap_atomic __kunmap_atomic +#define kmap_atomic_to_page __kmap_atomic_to_page + +#define flush_cache_kmaps() flush_cache_all() + +extern void kmap_init(void); + +#define kmap_prot PAGE_KERNEL + +#endif /* __KERNEL__ */ + +#endif /* _ASM_HIGHMEM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/hw_irq.h b/target/linux/realtek/files/arch/rlx/include/asm/hw_irq.h new file mode 100644 index 000000000..7f1281c9f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/hw_irq.h @@ -0,0 +1,18 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 2001, 2002 by Ralf Baechle + */ +#ifndef __ASM_HW_IRQ_H +#define __ASM_HW_IRQ_H + +#include <asm/atomic.h> + +/* + * interrupt-retrigger: NOP for now. This may not be apropriate for all + * machines, we'll see ... + */ + +#endif /* __ASM_HW_IRQ_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ide.h b/target/linux/realtek/files/arch/rlx/include/asm/ide.h new file mode 100644 index 000000000..bb674c3b0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ide.h @@ -0,0 +1,13 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * This file contains the MIPS architecture specific IDE code. + */ +#ifndef __ASM_IDE_H +#define __ASM_IDE_H + +#include <ide.h> + +#endif /* __ASM_IDE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/inst.h b/target/linux/realtek/files/arch/rlx/include/asm/inst.h new file mode 100644 index 000000000..6489f0073 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/inst.h @@ -0,0 +1,394 @@ +/* + * Format of an instruction in memory. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 2000 by Ralf Baechle + * Copyright (C) 2006 by Thiemo Seufer + */ +#ifndef _ASM_INST_H +#define _ASM_INST_H + +/* + * Major opcodes; before MIPS IV cop1x was called cop3. + */ +enum major_op { + spec_op, bcond_op, j_op, jal_op, + beq_op, bne_op, blez_op, bgtz_op, + addi_op, addiu_op, slti_op, sltiu_op, + andi_op, ori_op, xori_op, lui_op, + cop0_op, cop1_op, cop2_op, cop1x_op, + beql_op, bnel_op, blezl_op, bgtzl_op, + daddi_op, daddiu_op, ldl_op, ldr_op, + spec2_op, jalx_op, mdmx_op, spec3_op, + lb_op, lh_op, lwl_op, lw_op, + lbu_op, lhu_op, lwr_op, lwu_op, + sb_op, sh_op, swl_op, sw_op, + sdl_op, sdr_op, swr_op, cache_op, + ll_op, lwc1_op, lwc2_op, pref_op, + lld_op, ldc1_op, ldc2_op, ld_op, + sc_op, swc1_op, swc2_op, major_3b_op, + scd_op, sdc1_op, sdc2_op, sd_op +}; + +/* + * func field of spec opcode. + */ +enum spec_op { + sll_op, movc_op, srl_op, sra_op, + sllv_op, pmon_op, srlv_op, srav_op, + jr_op, jalr_op, movz_op, movn_op, + syscall_op, break_op, spim_op, sync_op, + mfhi_op, mthi_op, mflo_op, mtlo_op, + dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op, + mult_op, multu_op, div_op, divu_op, + dmult_op, dmultu_op, ddiv_op, ddivu_op, + add_op, addu_op, sub_op, subu_op, + and_op, or_op, xor_op, nor_op, + spec3_unused_op, spec4_unused_op, slt_op, sltu_op, + dadd_op, daddu_op, dsub_op, dsubu_op, + tge_op, tgeu_op, tlt_op, tltu_op, + teq_op, spec5_unused_op, tne_op, spec6_unused_op, + dsll_op, spec7_unused_op, dsrl_op, dsra_op, + dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op +}; + +/* + * func field of spec2 opcode. + */ +enum spec2_op { + madd_op, maddu_op, mul_op, spec2_3_unused_op, + msub_op, msubu_op, /* more unused ops */ + clz_op = 0x20, clo_op, + dclz_op = 0x24, dclo_op, + sdbpp_op = 0x3f +}; + +/* + * func field of spec3 opcode. + */ +enum spec3_op { + ext_op, dextm_op, dextu_op, dext_op, + ins_op, dinsm_op, dinsu_op, dins_op, + bshfl_op = 0x20, + dbshfl_op = 0x24, + rdhwr_op = 0x3b +}; + +/* + * rt field of bcond opcodes. + */ +enum rt_op { + bltz_op, bgez_op, bltzl_op, bgezl_op, + spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07, + tgei_op, tgeiu_op, tlti_op, tltiu_op, + teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op, + bltzal_op, bgezal_op, bltzall_op, bgezall_op, + rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17, + rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b, + bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f +}; + +/* + * rs field of cop opcodes. + */ +enum cop_op { + mfc_op = 0x00, dmfc_op = 0x01, + cfc_op = 0x02, mtc_op = 0x04, + dmtc_op = 0x05, ctc_op = 0x06, + bc_op = 0x08, cop_op = 0x10, + copm_op = 0x18 +}; + +/* + * rt field of cop.bc_op opcodes + */ +enum bcop_op { + bcf_op, bct_op, bcfl_op, bctl_op +}; + +/* + * func field of cop0 coi opcodes. + */ +enum cop0_coi_func { + tlbr_op = 0x01, tlbwi_op = 0x02, + tlbwr_op = 0x06, tlbp_op = 0x08, + rfe_op = 0x10, eret_op = 0x18 +}; + +/* + * func field of cop0 com opcodes. + */ +enum cop0_com_func { + tlbr1_op = 0x01, tlbw_op = 0x02, + tlbp1_op = 0x08, dctr_op = 0x09, + dctw_op = 0x0a +}; + +/* + * fmt field of cop1 opcodes. + */ +enum cop1_fmt { + s_fmt, d_fmt, e_fmt, q_fmt, + w_fmt, l_fmt +}; + +/* + * func field of cop1 instructions using d, s or w format. + */ +enum cop1_sdw_func { + fadd_op = 0x00, fsub_op = 0x01, + fmul_op = 0x02, fdiv_op = 0x03, + fsqrt_op = 0x04, fabs_op = 0x05, + fmov_op = 0x06, fneg_op = 0x07, + froundl_op = 0x08, ftruncl_op = 0x09, + fceill_op = 0x0a, ffloorl_op = 0x0b, + fround_op = 0x0c, ftrunc_op = 0x0d, + fceil_op = 0x0e, ffloor_op = 0x0f, + fmovc_op = 0x11, fmovz_op = 0x12, + fmovn_op = 0x13, frecip_op = 0x15, + frsqrt_op = 0x16, fcvts_op = 0x20, + fcvtd_op = 0x21, fcvte_op = 0x22, + fcvtw_op = 0x24, fcvtl_op = 0x25, + fcmp_op = 0x30 +}; + +/* + * func field of cop1x opcodes (MIPS IV). + */ +enum cop1x_func { + lwxc1_op = 0x00, ldxc1_op = 0x01, + pfetch_op = 0x07, swxc1_op = 0x08, + sdxc1_op = 0x09, madd_s_op = 0x20, + madd_d_op = 0x21, madd_e_op = 0x22, + msub_s_op = 0x28, msub_d_op = 0x29, + msub_e_op = 0x2a, nmadd_s_op = 0x30, + nmadd_d_op = 0x31, nmadd_e_op = 0x32, + nmsub_s_op = 0x38, nmsub_d_op = 0x39, + nmsub_e_op = 0x3a +}; + +/* + * func field for mad opcodes (MIPS IV). + */ +enum mad_func { + madd_fp_op = 0x08, msub_fp_op = 0x0a, + nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e +}; + +/* + * Damn ... bitfields depend from byteorder :-( + */ +#ifdef __MIPSEB__ +struct j_format { /* Jump format */ + unsigned int opcode : 6; + unsigned int target : 26; +}; + +struct i_format { /* Immediate format (addi, lw, ...) */ + unsigned int opcode : 6; + unsigned int rs : 5; + unsigned int rt : 5; + signed int simmediate : 16; +}; + +struct u_format { /* Unsigned immediate format (ori, xori, ...) */ + unsigned int opcode : 6; + unsigned int rs : 5; + unsigned int rt : 5; + unsigned int uimmediate : 16; +}; + +struct c_format { /* Cache (>= R6000) format */ + unsigned int opcode : 6; + unsigned int rs : 5; + unsigned int c_op : 3; + unsigned int cache : 2; + unsigned int simmediate : 16; +}; + +struct r_format { /* Register format */ + unsigned int opcode : 6; + unsigned int rs : 5; + unsigned int rt : 5; + unsigned int rd : 5; + unsigned int re : 5; + unsigned int func : 6; +}; + +struct p_format { /* Performance counter format (R10000) */ + unsigned int opcode : 6; + unsigned int rs : 5; + unsigned int rt : 5; + unsigned int rd : 5; + unsigned int re : 5; + unsigned int func : 6; +}; + +struct f_format { /* FPU register format */ + unsigned int opcode : 6; + unsigned int : 1; + unsigned int fmt : 4; + unsigned int rt : 5; + unsigned int rd : 5; + unsigned int re : 5; + unsigned int func : 6; +}; + +struct ma_format { /* FPU multipy and add format (MIPS IV) */ + unsigned int opcode : 6; + unsigned int fr : 5; + unsigned int ft : 5; + unsigned int fs : 5; + unsigned int fd : 5; + unsigned int func : 4; + unsigned int fmt : 2; +}; + +#elif defined(__MIPSEL__) + +struct j_format { /* Jump format */ + unsigned int target : 26; + unsigned int opcode : 6; +}; + +struct i_format { /* Immediate format */ + signed int simmediate : 16; + unsigned int rt : 5; + unsigned int rs : 5; + unsigned int opcode : 6; +}; + +struct u_format { /* Unsigned immediate format */ + unsigned int uimmediate : 16; + unsigned int rt : 5; + unsigned int rs : 5; + unsigned int opcode : 6; +}; + +struct c_format { /* Cache (>= R6000) format */ + unsigned int simmediate : 16; + unsigned int cache : 2; + unsigned int c_op : 3; + unsigned int rs : 5; + unsigned int opcode : 6; +}; + +struct r_format { /* Register format */ + unsigned int func : 6; + unsigned int re : 5; + unsigned int rd : 5; + unsigned int rt : 5; + unsigned int rs : 5; + unsigned int opcode : 6; +}; + +struct p_format { /* Performance counter format (R10000) */ + unsigned int func : 6; + unsigned int re : 5; + unsigned int rd : 5; + unsigned int rt : 5; + unsigned int rs : 5; + unsigned int opcode : 6; +}; + +struct f_format { /* FPU register format */ + unsigned int func : 6; + unsigned int re : 5; + unsigned int rd : 5; + unsigned int rt : 5; + unsigned int fmt : 4; + unsigned int : 1; + unsigned int opcode : 6; +}; + +struct ma_format { /* FPU multipy and add format (MIPS IV) */ + unsigned int fmt : 2; + unsigned int func : 4; + unsigned int fd : 5; + unsigned int fs : 5; + unsigned int ft : 5; + unsigned int fr : 5; + unsigned int opcode : 6; +}; + +#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */ +#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?" +#endif + +union mips_instruction { + unsigned int word; + unsigned short halfword[2]; + unsigned char byte[4]; + struct j_format j_format; + struct i_format i_format; + struct u_format u_format; + struct c_format c_format; + struct r_format r_format; + struct f_format f_format; + struct ma_format ma_format; +}; + +/* HACHACHAHCAHC ... */ + +/* In case some other massaging is needed, keep MIPSInst as wrapper */ + +#define MIPSInst(x) x + +#define I_OPCODE_SFT 26 +#define MIPSInst_OPCODE(x) (MIPSInst(x) >> I_OPCODE_SFT) + +#define I_JTARGET_SFT 0 +#define MIPSInst_JTARGET(x) (MIPSInst(x) & 0x03ffffff) + +#define I_RS_SFT 21 +#define MIPSInst_RS(x) ((MIPSInst(x) & 0x03e00000) >> I_RS_SFT) + +#define I_RT_SFT 16 +#define MIPSInst_RT(x) ((MIPSInst(x) & 0x001f0000) >> I_RT_SFT) + +#define I_IMM_SFT 0 +#define MIPSInst_SIMM(x) ((int)((short)(MIPSInst(x) & 0xffff))) +#define MIPSInst_UIMM(x) (MIPSInst(x) & 0xffff) + +#define I_CACHEOP_SFT 18 +#define MIPSInst_CACHEOP(x) ((MIPSInst(x) & 0x001c0000) >> I_CACHEOP_SFT) + +#define I_CACHESEL_SFT 16 +#define MIPSInst_CACHESEL(x) ((MIPSInst(x) & 0x00030000) >> I_CACHESEL_SFT) + +#define I_RD_SFT 11 +#define MIPSInst_RD(x) ((MIPSInst(x) & 0x0000f800) >> I_RD_SFT) + +#define I_RE_SFT 6 +#define MIPSInst_RE(x) ((MIPSInst(x) & 0x000007c0) >> I_RE_SFT) + +#define I_FUNC_SFT 0 +#define MIPSInst_FUNC(x) (MIPSInst(x) & 0x0000003f) + +#define I_FFMT_SFT 21 +#define MIPSInst_FFMT(x) ((MIPSInst(x) & 0x01e00000) >> I_FFMT_SFT) + +#define I_FT_SFT 16 +#define MIPSInst_FT(x) ((MIPSInst(x) & 0x001f0000) >> I_FT_SFT) + +#define I_FS_SFT 11 +#define MIPSInst_FS(x) ((MIPSInst(x) & 0x0000f800) >> I_FS_SFT) + +#define I_FD_SFT 6 +#define MIPSInst_FD(x) ((MIPSInst(x) & 0x000007c0) >> I_FD_SFT) + +#define I_FR_SFT 21 +#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT) + +#define I_FMA_FUNC_SFT 2 +#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT) + +#define I_FMA_FFMT_SFT 0 +#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003) + +typedef unsigned int mips_instruction; + +#endif /* _ASM_INST_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/io.h b/target/linux/realtek/files/arch/rlx/include/asm/io.h new file mode 100644 index 000000000..0f3039847 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/io.h @@ -0,0 +1,530 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995 Waldorf GmbH + * Copyright (C) 1994 - 2000, 06 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki <macro@mips.com> + */ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include <asm/addrspace.h> +#include <asm/byteorder.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm-generic/iomap.h> +#include <asm/page.h> +#include <asm/pgtable-bits.h> +#include <asm/processor.h> +#include <asm/string.h> + +#include <ioremap.h> +#include <mangle-port.h> + +/* + * Slowdown I/O port space accesses for antique hardware. + */ +#undef CONF_SLOWDOWN_IO + +/* + * Raw operations are never swapped in software. OTOH values that raw + * operations are working on may or may not have been swapped by the bus + * hardware. An example use would be for flash memory that's used for + * execute in place. + */ +# define __raw_ioswabb(a, x) (x) +# define __raw_ioswabw(a, x) (x) +# define __raw_ioswabl(a, x) (x) +# define __raw_ioswabq(a, x) (x) +# define ____raw_ioswabq(a, x) (x) + +/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ + +#define IO_SPACE_LIMIT 0xffff + +/* + * On MIPS I/O ports are memory mapped, so we access them using normal + * load/store instructions. mips_io_port_base is the virtual address to + * which all ports are being mapped. For sake of efficiency some code + * assumes that this is an address that can be loaded with a single lui + * instruction, so the lower 16 bits must be zero. Should be true on + * on any sane architecture; generic code does not use this assumption. + */ +extern const unsigned long mips_io_port_base; + +/* + * Gcc will generate code to load the value of mips_io_port_base after each + * function call which may be fairly wasteful in some cases. So we don't + * play quite by the book. We tell gcc mips_io_port_base is a long variable + * which solves the code generation issue. Now we need to violate the + * aliasing rules a little to make initialization possible and finally we + * will need the barrier() to fight side effects of the aliasing chat. + * This trickery will eventually collapse under gcc's optimizer. Oh well. + */ +static inline void set_io_port_base(unsigned long base) +{ + * (unsigned long *) &mips_io_port_base = base; + barrier(); +} + +/* + * Thanks to James van Artsdalen for a better timing-fix than + * the two short jumps: using outb's to a nonexistent port seems + * to guarantee better timings even on fast machines. + * + * On the other hand, I'd like to be sure of a non-existent port: + * I feel a bit unsafe about using 0x80 (should be safe, though) + * + * Linus + * + */ + +#define __SLOW_DOWN_IO \ + __asm__ __volatile__( \ + "sb\t$0,0x80(%0)" \ + : : "r" (mips_io_port_base)); + +#ifdef CONF_SLOWDOWN_IO +#ifdef REALLY_SLOW_IO +#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } +#else +#define SLOW_DOWN_IO __SLOW_DOWN_IO +#endif +#else +#define SLOW_DOWN_IO +#endif + +/* + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline unsigned long virt_to_phys(volatile const void *address) +{ + return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; +} + +/* + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline void * phys_to_virt(unsigned long address) +{ + return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); +} + +/* + * ISA I/O bus memory addresses are 1:1 with the physical address. + */ +static inline unsigned long isa_virt_to_bus(volatile void * address) +{ + return (unsigned long)address - PAGE_OFFSET; +} + +static inline void * isa_bus_to_virt(unsigned long address) +{ + return (void *)(address + PAGE_OFFSET); +} + +#define isa_page_to_bus page_to_phys + +/* + * However PCI ones are not necessarily 1:1 and therefore these interfaces + * are forbidden in portable PCI drivers. + * + * Allow them for x86 for legacy drivers, though. + */ +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); +extern void __iounmap(const volatile void __iomem *addr); + +static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, + unsigned long flags) +{ + void __iomem *addr = plat_ioremap(offset, size, flags); + + if (addr) + return addr; + +#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) + + if (__builtin_constant_p(offset) && + __builtin_constant_p(size) && __builtin_constant_p(flags)) { + phys_t phys_addr, last_addr; + + phys_addr = fixup_bigphys_addr(offset, size); + + /* Don't allow wraparound or zero size. */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Map uncached objects in the low 512MB of address + * space using KSEG1. + */ + if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && + flags == _CACHE_UNCACHED) + return (void __iomem *) + (unsigned long)CKSEG1ADDR(phys_addr); + } + + return __ioremap(offset, size, flags); + +#undef __IS_LOW512 +} + +/* + * ioremap - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + */ +#define ioremap(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) + +/* + * ioremap_nocache - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked uncachable + * on the CPU as well as honouring existing caching rules from things like + * the PCI bus. Note that there are other caches and buffers on many + * busses. In paticular driver authors should read up on PCI writes + * + * It's useful if some control registers are in such an area and + * write combining or read caching is not desirable: + */ +#define ioremap_nocache(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) + +/* + * ioremap_cachable - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked cachable by + * the CPU. Also enables full write-combining. Useful for some + * memory-like regions on I/O busses. + */ +#define ioremap_cachable(offset, size) \ + __ioremap_mode((offset), (size), _page_cachable_default) + +/* + * These two are MIPS specific ioremap variant. ioremap_cacheable_cow + * requests a cachable mapping, ioremap_uncached_accelerated requests a + * mapping using the uncached accelerated mode which isn't supported on + * all processors. + */ +#define ioremap_cacheable_cow(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) +#define ioremap_uncached_accelerated(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) + +static inline void iounmap(const volatile void __iomem *addr) +{ + if (plat_iounmap(addr)) + return; + +#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) + + if (__builtin_constant_p(addr) && __IS_KSEG1(addr)) + return; + + __iounmap(addr); + +#undef __IS_KSEG1 +} + +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ + \ +static inline void pfx##write##bwlq(type val, \ + volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + __val = pfx##ioswab##bwlq(__mem, val); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + *__mem = __val; \ + else \ + BUG(); \ +} \ + \ +static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + __val = *__mem; \ + else { \ + __val = 0; \ + BUG(); \ + } \ + \ + return pfx##ioswab##bwlq(__mem, __val); \ +} + +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ + \ +static inline void pfx##out##bwlq##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + __val = pfx##ioswab##bwlq(__addr, val); \ + \ + /* Really, we want this to be atomic */ \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ + slow; \ +} \ + \ +static inline type pfx##in##bwlq##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + slow; \ + \ + return pfx##ioswab##bwlq(__addr, __val); \ +} + +#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) + +#define BUILDIO_MEM(bwlq, type) \ + \ +__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ +__BUILD_MEMORY_PFX(, bwlq, type) \ +__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) +BUILDIO_MEM(q, u64) + +#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) + +#define BUILDIO_IOPORT(bwlq, type) \ + __BUILD_IOPORT_PFX(, bwlq, type) \ + __BUILD_IOPORT_PFX(__mem_, bwlq, type) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) + +#define __BUILDIO(bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) + +__BUILDIO(q, u64) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl +#define readq_relaxed readq + +/* + * Some code tests for these symbols + */ +#define readq readq +#define writeq writeq + +#define __BUILD_MEMORY_STRING(bwlq, type) \ + \ +static inline void writes##bwlq(volatile void __iomem *mem, \ + const void *addr, unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_write##bwlq(*__addr, mem); \ + __addr++; \ + } \ +} \ + \ +static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_read##bwlq(mem); \ + __addr++; \ + } \ +} + +#define __BUILD_IOPORT_STRING(bwlq, type) \ + \ +static inline void outs##bwlq(unsigned long port, const void *addr, \ + unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_out##bwlq(*__addr, port); \ + __addr++; \ + } \ +} \ + \ +static inline void ins##bwlq(unsigned long port, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_in##bwlq(port); \ + __addr++; \ + } \ +} + +#define BUILDSTRING(bwlq, type) \ + \ +__BUILD_MEMORY_STRING(bwlq, type) \ +__BUILD_IOPORT_STRING(bwlq, type) + +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) + +/* Depends on MIPS II instruction set */ +#ifdef CONFIG_CPU_HAS_SYNC +#define mmiowb() __asm__ __volatile__ ("sync" ::: "memory") +#else +#define mmiowb() __asm__ __volatile__ (" " ::: "memory") +//#define mmiowb() __fast_iob() +#endif + +static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) +{ + memset((void __force *) addr, val, count); +} +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) +{ + memcpy(dst, (void __force *) src, count); +} +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) +{ + memcpy((void __force *) dst, src, count); +} + +/* + * The caches on some architectures aren't dma-coherent and have need to + * handle this in software. There are three types of operations that + * can be applied to dma buffers. + * + * - dma_cache_wback_inv(start, size) makes caches and coherent by + * writing the content of the caches back to memory, if necessary. + * The function also invalidates the affected part of the caches as + * necessary before DMA transfers from outside to memory. + * - dma_cache_wback(start, size) makes caches and coherent by + * writing the content of the caches back to memory, if necessary. + * The function also invalidates the affected part of the caches as + * necessary before DMA transfers from outside to memory. + * - dma_cache_inv(start, size) invalidates the affected parts of the + * caches. Dirty lines of the caches may be written back or simply + * be discarded. This operation is necessary before dma operations + * to the memory. + * + * This API used to be exported; it now is for arch code internal use only. + */ +extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); +extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); +extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); + +#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) +#define dma_cache_wback(start, size) _dma_cache_wback(start, size) +#define dma_cache_inv(start, size) _dma_cache_inv(start, size) + +/* + * Read a 32-bit register that requires a 64-bit read cycle on the bus. + * Avoid interrupt mucking, just adjust the address for 4-byte access. + * Assume the addresses are 8-byte aligned. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __CSR_32_ADJUST 4 +#else +#define __CSR_32_ADJUST 0 +#endif + +#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) +#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* _ASM_IO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ioctl.h b/target/linux/realtek/files/arch/rlx/include/asm/ioctl.h new file mode 100644 index 000000000..c412b7fcd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ioctl.h @@ -0,0 +1,27 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2009 Wind River Systems + * Written by Ralf Baechle <ralf@linux-mips.org> + */ +#ifndef _ASM_IOCTL_H +#define _ASM_IOCTL_H + +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +/* + * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. + * And this turns out useful to catch old ioctl numbers in header + * files for us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#include <asm-generic/ioctl.h> + +#endif /* _ASM_IOCTL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ioctls.h b/target/linux/realtek/files/arch/rlx/include/asm/ioctls.h new file mode 100644 index 000000000..3f04a995e --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ioctls.h @@ -0,0 +1,109 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 2001 Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef __ASM_IOCTLS_H +#define __ASM_IOCTLS_H + +#include <asm/ioctl.h> + +#define TCGETA 0x5401 +#define TCSETA 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ +#define TCSETAW 0x5403 +#define TCSETAF 0x5404 + +#define TCSBRK 0x5405 +#define TCXONC 0x5406 +#define TCFLSH 0x5407 + +#define TCGETS 0x540d +#define TCSETS 0x540e +#define TCSETSW 0x540f +#define TCSETSF 0x5410 + +#define TIOCEXCL 0x740d /* set exclusive use of tty */ +#define TIOCNXCL 0x740e /* reset exclusive use of tty */ +#define TIOCOUTQ 0x7472 /* output queue size */ +#define TIOCSTI 0x5472 /* simulate terminal input */ +#define TIOCMGET 0x741d /* get all modem bits */ +#define TIOCMBIS 0x741b /* bis modem bits */ +#define TIOCMBIC 0x741c /* bic modem bits */ +#define TIOCMSET 0x741a /* set all modem bits */ +#define TIOCPKT 0x5470 /* pty: set/clear packet mode */ +#define TIOCPKT_DATA 0x00 /* data packet */ +#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */ +#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */ +#define TIOCPKT_STOP 0x04 /* stop output */ +#define TIOCPKT_START 0x08 /* start output */ +#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */ +#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */ +/* #define TIOCPKT_IOCTL 0x40 state change of pty driver */ +#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */ +#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */ +#define TIOCNOTTY 0x5471 /* void tty association */ +#define TIOCSETD 0x7401 +#define TIOCGETD 0x7400 + +#define FIOCLEX 0x6601 +#define FIONCLEX 0x6602 +#define FIOASYNC 0x667d +#define FIONBIO 0x667e +#define FIOQSIZE 0x667f + +#define TIOCGLTC 0x7474 /* get special local chars */ +#define TIOCSLTC 0x7475 /* set special local chars */ +#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */ +#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */ +#define TIOCCONS _IOW('t', 120, int) /* become virtual console */ + +#define FIONREAD 0x467f +#define TIOCINQ FIONREAD + +#define TIOCGETP 0x7408 +#define TIOCSETP 0x7409 +#define TIOCSETN 0x740a /* TIOCSETP wo flush */ + +/* #define TIOCSETA _IOW('t', 20, struct termios) set termios struct */ +/* #define TIOCSETAW _IOW('t', 21, struct termios) drain output, set */ +/* #define TIOCSETAF _IOW('t', 22, struct termios) drn out, fls in, set */ +/* #define TIOCGETD _IOR('t', 26, int) get line discipline */ +/* #define TIOCSETD _IOW('t', 27, int) set line discipline */ + /* 127-124 compat */ + +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x7416 /* Return the session ID of FD */ +#define TCGETS2 _IOR('T', 0x2A, struct termios2) +#define TCSETS2 _IOW('T', 0x2B, struct termios2) +#define TCSETSW2 _IOW('T', 0x2C, struct termios2) +#define TCSETSF2 _IOW('T', 0x2D, struct termios2) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ + +/* I hope the range from 0x5480 on is free ... */ +#define TIOCSCTTY 0x5480 /* become controlling tty */ +#define TIOCGSOFTCAR 0x5481 +#define TIOCSSOFTCAR 0x5482 +#define TIOCLINUX 0x5483 +#define TIOCGSERIAL 0x5484 +#define TIOCSSERIAL 0x5485 +#define TCSBRKP 0x5486 /* Needed for POSIX tcsendbreak() */ +#define TIOCSERCONFIG 0x5488 +#define TIOCSERGWILD 0x5489 +#define TIOCSERSWILD 0x548a +#define TIOCGLCKTRMIOS 0x548b +#define TIOCSLCKTRMIOS 0x548c +#define TIOCSERGSTRUCT 0x548d /* For debugging only */ +#define TIOCSERGETLSR 0x548e /* Get line status register */ +#define TIOCSERGETMULTI 0x548f /* Get multiport config */ +#define TIOCSERSETMULTI 0x5490 /* Set multiport config */ +#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */ +#define TIOCGHAYESESP 0x5493 /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x5494 /* Set Hayes ESP configuration */ + +#endif /* __ASM_IOCTLS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ipcbuf.h b/target/linux/realtek/files/arch/rlx/include/asm/ipcbuf.h new file mode 100644 index 000000000..d47d08f26 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ipcbuf.h @@ -0,0 +1,28 @@ +#ifndef _ASM_IPCBUF_H +#define _ASM_IPCBUF_H + +/* + * The ipc64_perm structure for alpha architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ASM_IPCBUF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/irq.h b/target/linux/realtek/files/arch/rlx/include/asm/irq.h new file mode 100644 index 000000000..58d84082f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/irq.h @@ -0,0 +1,62 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle + */ +#ifndef _ASM_IRQ_H +#define _ASM_IRQ_H + +#include <linux/linkage.h> +#include <linux/smp.h> +#include <irq.h> + +#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ + +/* + * do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + * + * Ideally there should be away to get this into kernel/irq/handle.c to + * avoid the overhead of a call for just a tiny function ... + */ +#define do_IRQ(irq) \ +do { \ + irq_enter(); \ + generic_handle_irq(irq); \ + irq_exit(); \ +} while (0) + + +// rock: add more detail spurious int info 2010-04-06 +enum { + SPURIOS_INT_CPU = 0, + SPURIOS_INT_LOPI, + SPURIOS_INT_CASCADE, + SPURIOS_INT_MAX +}; + +extern void arch_init_irq(void); +#if defined(CONFIG_RTK_VOIP) || defined(CONFIG_RTL_819X) +extern void spurious_interrupt(int i); +#else +extern void spurious_interrupt(void); +#endif + +extern int allocate_irqno(void); +extern void alloc_legacy_irqno(void); +extern void free_irqno(unsigned int irq); + +/* + * Before R2 the timer and performance counter interrupts were both fixed to + * IE7. Since R2 their number has to be read from the c0_intctl register. + */ +#define CP0_LEGACY_COMPARE_IRQ 7 + +extern int cp0_compare_irq; +extern int cp0_perfcount_irq; + +#endif /* _ASM_IRQ_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/irq_cpu.h b/target/linux/realtek/files/arch/rlx/include/asm/irq_cpu.h new file mode 100644 index 000000000..56e3d4947 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/irq_cpu.h @@ -0,0 +1,18 @@ +/* + * include/asm-mips/irq_cpu.h + * + * MIPS CPU interrupt definitions. + * + * Copyright (C) 2002 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_IRQ_CPU_H +#define _ASM_IRQ_CPU_H + +extern void rlx_cpu_irq_init(int irq_base); + +#endif /* _ASM_IRQ_CPU_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/irq_regs.h b/target/linux/realtek/files/arch/rlx/include/asm/irq_regs.h new file mode 100644 index 000000000..33bd2a06d --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/irq_regs.h @@ -0,0 +1,21 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_IRQ_REGS_H +#define __ASM_IRQ_REGS_H + +#define ARCH_HAS_OWN_IRQ_REGS + +#include <linux/thread_info.h> + +static inline struct pt_regs *get_irq_regs(void) +{ + return current_thread_info()->regs; +} + +#endif /* __ASM_IRQ_REGS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/irq_vec.h b/target/linux/realtek/files/arch/rlx/include/asm/irq_vec.h new file mode 100644 index 000000000..c56f6590e --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/irq_vec.h @@ -0,0 +1,12 @@ +/* + * Realtek Semiconductor Corp. + * + * Tony Wu (tonywu@realtek.com.tw) + * Feb. 28, 2008 + */ +#ifndef _ASM_IRQ_VEC_H_ +#define _ASM_IRQ_VEC_H_ + +extern void rlx_vec_irq_init(int irq_base); + +#endif /* _ASM_IRQ_VEC_H_ */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/irqflags.h b/target/linux/realtek/files/arch/rlx/include/asm/irqflags.h new file mode 100644 index 000000000..35c805c2d --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/irqflags.h @@ -0,0 +1,184 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <asm/hazards.h> + +__asm__( + " .macro raw_local_irq_enable \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1e \n" + " mtc0 $1,$12 \n" + " irq_enable_hazard \n" + " .set pop \n" + " .endm"); + +static inline void raw_local_irq_enable(void) +{ + __asm__ __volatile__( + "raw_local_irq_enable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( + " .macro raw_local_irq_disable\n" + " .set push \n" + " .set noat \n" + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1f \n" + " .set noreorder \n" + " mtc0 $1,$12 \n" + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +static inline void raw_local_irq_disable(void) +{ + __asm__ __volatile__( + "raw_local_irq_disable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + +__asm__( + " .macro raw_local_save_flags flags \n" + " .set push \n" + " .set reorder \n" + " mfc0 \\flags, $12 \n" + " .set pop \n" + " .endm \n"); + +#define raw_local_save_flags(x) \ +__asm__ __volatile__( \ + "raw_local_save_flags %0" \ + : "=r" (x)) + +__asm__( + " .macro raw_local_irq_save result \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" + " mfc0 \\result, $12 \n" + " ori $1, \\result, 0x1f \n" + " xori $1, 0x1f \n" + " .set noreorder \n" + " mtc0 $1, $12 \n" + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +#define raw_local_irq_save(x) \ +__asm__ __volatile__( \ + "raw_local_irq_save\t%0" \ + : "=r" (x) \ + : /* no inputs */ \ + : "memory") + +__asm__( + " .macro raw_local_irq_restore flags \n" + " .set push \n" + " .set noreorder \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " andi \\flags, 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or \\flags, $1 \n" + " mtc0 \\flags, $12 \n" + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +static inline void raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} + +static inline void __raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & 1); +} + +#endif + +/* + * Do the CPU's IRQ-state tracing from assembly code. + */ +#ifdef CONFIG_TRACE_IRQFLAGS +/* Reload some registers clobbered by trace_hardirqs_on */ +# define TRACE_IRQS_RELOAD_REGS \ + LONG_L $7, PT_R7(sp); \ + LONG_L $6, PT_R6(sp); \ + LONG_L $5, PT_R5(sp); \ + LONG_L $4, PT_R4(sp); \ + LONG_L $2, PT_R2(sp) +# define TRACE_IRQS_ON \ + CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \ + jal trace_hardirqs_on +# define TRACE_IRQS_ON_RELOAD \ + TRACE_IRQS_ON; \ + TRACE_IRQS_RELOAD_REGS +# define TRACE_IRQS_OFF \ + jal trace_hardirqs_off +#else +# define TRACE_IRQS_ON +# define TRACE_IRQS_ON_RELOAD +# define TRACE_IRQS_OFF +#endif + +#endif /* _ASM_IRQFLAGS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/isadep.h b/target/linux/realtek/files/arch/rlx/include/asm/isadep.h new file mode 100644 index 000000000..1a87422dc --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/isadep.h @@ -0,0 +1,19 @@ +/* + * Various ISA level dependent constants. + * Most of the following constants reflect the different layout + * of Coprocessor 0 registers. + * + * Copyright (c) 1998 Harald Koerfgen + */ + +#ifndef __ASM_ISADEP_H +#define __ASM_ISADEP_H + +/* + * kernel or user mode? (CP0_STATUS) + */ +#define KU_MASK 0x08 +#define KU_USER 0x08 +#define KU_KERN 0x00 + +#endif /* __ASM_ISADEP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/kdebug.h b/target/linux/realtek/files/arch/rlx/include/asm/kdebug.h new file mode 100644 index 000000000..5bf62aafc --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/kdebug.h @@ -0,0 +1,13 @@ +#ifndef _ASM_MIPS_KDEBUG_H +#define _ASM_MIPS_KDEBUG_H + +#include <linux/notifier.h> + +enum die_val { + DIE_OOPS = 1, + DIE_FP, + DIE_TRAP, + DIE_RI, +}; + +#endif /* _ASM_MIPS_KDEBUG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/kexec.h b/target/linux/realtek/files/arch/rlx/include/asm/kexec.h new file mode 100644 index 000000000..4314892aa --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/kexec.h @@ -0,0 +1,30 @@ +/* + * kexec.h for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef _MIPS_KEXEC +# define _MIPS_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) + /* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_MIPS + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* !_MIPS_KEXEC */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/kgdb.h b/target/linux/realtek/files/arch/rlx/include/asm/kgdb.h new file mode 100644 index 000000000..48223b093 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/kgdb.h @@ -0,0 +1,44 @@ +#ifndef __ASM_KGDB_H_ +#define __ASM_KGDB_H_ + +#ifdef __KERNEL__ + +#include <asm/sgidefs.h> + +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) || \ + (_MIPS_ISA == _MIPS_ISA_MIPS32) + +#define KGDB_GDB_REG_SIZE 32 + +#elif (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \ + (_MIPS_ISA == _MIPS_ISA_MIPS64) + +#ifdef CONFIG_32BIT +#define KGDB_GDB_REG_SIZE 32 +#else /* CONFIG_CPU_32BIT */ +#define KGDB_GDB_REG_SIZE 64 +#endif +#else +#error "Need to set KGDB_GDB_REG_SIZE for MIPS ISA" +#endif /* _MIPS_ISA */ + +#define BUFMAX 2048 +#if (KGDB_GDB_REG_SIZE == 32) +#define NUMREGBYTES (90*sizeof(u32)) +#define NUMCRITREGBYTES (12*sizeof(u32)) +#else +#define NUMREGBYTES (90*sizeof(u64)) +#define NUMCRITREGBYTES (12*sizeof(u64)) +#endif +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +extern void arch_kgdb_breakpoint(void); +extern int kgdb_early_setup; +extern void *saved_vectors[32]; +extern void handle_exception(struct pt_regs *regs); +extern void breakinst(void); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KGDB_H_ */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/kmap_types.h b/target/linux/realtek/files/arch/rlx/include/asm/kmap_types.h new file mode 100644 index 000000000..9ee138d41 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/kmap_types.h @@ -0,0 +1,14 @@ +#ifndef _ASM_KMAP_TYPES_H +#define _ASM_KMAP_TYPES_H + + + +#ifdef CONFIG_DEBUG_HIGHMEM +#define __WITH_KM_FENCE +#endif + +#include <asm-generic/kmap_types.h> + +#undef __WITH_KM_FENCE + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/kspd.h b/target/linux/realtek/files/arch/rlx/include/asm/kspd.h new file mode 100644 index 000000000..4e9e724c8 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/kspd.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + */ + +#ifndef _ASM_KSPD_H +#define _ASM_KSPD_H + +struct kspd_notifications { + void (*kspd_sp_exit)(int sp_id); + + struct list_head list; +}; + +#ifdef CONFIG_MIPS_APSP_KSPD +extern void kspd_notify(struct kspd_notifications *notify); +#else +static inline void kspd_notify(struct kspd_notifications *notify) +{ +} +#endif + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/linkage.h b/target/linux/realtek/files/arch/rlx/include/asm/linkage.h new file mode 100644 index 000000000..e9a940d1b --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/linkage.h @@ -0,0 +1,10 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#ifdef __ASSEMBLY__ +#include <asm/asm.h> +#endif + +#define __weak __attribute__((weak)) + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/local.h b/target/linux/realtek/files/arch/rlx/include/asm/local.h new file mode 100644 index 000000000..889cc4a15 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/local.h @@ -0,0 +1,198 @@ +#ifndef _ARCH_MIPS_LOCAL_H +#define _ARCH_MIPS_LOCAL_H + +#include <linux/percpu.h> +#include <linux/bitops.h> +#include <asm/atomic.h> +#include <asm/cmpxchg.h> + +typedef struct +{ + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l, i) atomic_long_set(&(l)->a, (i)) + +#define local_add(i, l) atomic_long_add((i), (&(l)->a)) +#define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) + +/* + * Same as above, but return the result value + */ +static __inline__ long local_add_return(long i, local_t * l) +{ + unsigned long result; + +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %1, %2 # local_add_return \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " addu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 1b \n" + " addu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); +#else + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result += i; + l->a.counter = result; + local_irq_restore(flags); +#endif + + return result; +} + +static __inline__ long local_sub_return(long i, local_t * l) +{ + unsigned long result; + +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %1, %2 # local_sub_return \n" +#if defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) || defined(CONFIG_CPU_RLX5281) + " nop \n" +#endif + " subu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); +#else + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result -= i; + l->a.counter = result; + local_irq_restore(flags); +#endif + + return result; +} + +#define local_cmpxchg(l, o, n) \ + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#define local_dec_return(l) local_sub_return(1, (l)) +#define local_inc_return(l) local_add_return(1, (l)) + +/* + * local_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically subtracts @i from @l and returns + * true if the result is zero, or false for all + * other cases. + */ +#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) + +/* + * local_inc_and_test - increment and test + * @l: pointer of type local_t + * + * Atomically increments @l by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define local_inc_and_test(l) (local_inc_return(l) == 0) + +/* + * local_dec_and_test - decrement by 1 and test + * @l: pointer of type local_t + * + * Atomically decrements @l by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) + +/* + * local_add_negative - add and test if negative + * @l: pointer of type local_t + * @i: integer value to add + * + * Atomically adds @i to @l and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define local_add_negative(i, l) (local_add_return(i, (l)) < 0) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + */ + +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i, l) ((l)->a.counter+=(i)) +#define __local_sub(i, l) ((l)->a.counter-=(i)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(l) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(l) \ + ({ preempt_disable(); \ + l; \ + preempt_enable(); }) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) + +#define __cpu_local_inc(l) cpu_local_inc(l) +#define __cpu_local_dec(l) cpu_local_dec(l) +#define __cpu_local_add(i, l) cpu_local_add((i), (l)) +#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) + +#endif /* _ARCH_MIPS_LOCAL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/cpu-feature-overrides.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/cpu-feature-overrides.h new file mode 100644 index 000000000..7c185bb06 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/cpu-feature-overrides.h @@ -0,0 +1,13 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H + +/* Intentionally empty file ... */ + +#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/dma-coherence.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/dma-coherence.h new file mode 100644 index 000000000..36c611b6c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/dma-coherence.h @@ -0,0 +1,69 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> + * + */ +#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H +#define __ASM_MACH_GENERIC_DMA_COHERENCE_H + +struct device; + +static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, + size_t size) +{ + return virt_to_phys(addr); +} + +static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, + struct page *page) +{ + return page_to_phys(page); +} + +static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +{ + return dma_addr; +} + +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +{ +} + +static inline int plat_dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if (mask < DMA_BIT_MASK(24)) + return 0; + + return 1; +} + +static inline void plat_extra_sync_for_device(struct device *dev) +{ + return; +} + +static inline int plat_dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ + return 0; +} + +static inline int plat_device_is_coherent(struct device *dev) +{ +#ifdef CONFIG_DMA_COHERENT + return 1; +#endif +#ifdef CONFIG_DMA_NONCOHERENT + return 0; +#endif +} + +#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/floppy.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/floppy.h new file mode 100644 index 000000000..001a8ce17 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/floppy.h @@ -0,0 +1,139 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1998, 2003 by Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_FLOPPY_H +#define __ASM_MACH_GENERIC_FLOPPY_H + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/sched.h> +#include <linux/linkage.h> +#include <linux/types.h> +#include <linux/mm.h> + +#include <asm/bootinfo.h> +#include <asm/cachectl.h> +#include <asm/dma.h> +#include <asm/floppy.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/pgtable.h> + +/* + * How to access the FDC's registers. + */ +static inline unsigned char fd_inb(unsigned int port) +{ + return inb_p(port); +} + +static inline void fd_outb(unsigned char value, unsigned int port) +{ + outb_p(value, port); +} + +/* + * How to access the floppy DMA functions. + */ +static inline void fd_enable_dma(void) +{ + enable_dma(FLOPPY_DMA); +} + +static inline void fd_disable_dma(void) +{ + disable_dma(FLOPPY_DMA); +} + +static inline int fd_request_dma(void) +{ + return request_dma(FLOPPY_DMA, "floppy"); +} + +static inline void fd_free_dma(void) +{ + free_dma(FLOPPY_DMA); +} + +static inline void fd_clear_dma_ff(void) +{ + clear_dma_ff(FLOPPY_DMA); +} + +static inline void fd_set_dma_mode(char mode) +{ + set_dma_mode(FLOPPY_DMA, mode); +} + +static inline void fd_set_dma_addr(char *addr) +{ + set_dma_addr(FLOPPY_DMA, (unsigned long) addr); +} + +static inline void fd_set_dma_count(unsigned int count) +{ + set_dma_count(FLOPPY_DMA, count); +} + +static inline int fd_get_dma_residue(void) +{ + return get_dma_residue(FLOPPY_DMA); +} + +static inline void fd_enable_irq(void) +{ + enable_irq(FLOPPY_IRQ); +} + +static inline void fd_disable_irq(void) +{ + disable_irq(FLOPPY_IRQ); +} + +static inline int fd_request_irq(void) +{ + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); +} + +static inline void fd_free_irq(void) +{ + free_irq(FLOPPY_IRQ, NULL); +} + +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); + + +static inline unsigned long fd_getfdaddr1(void) +{ + return 0x3f0; +} + +static inline unsigned long fd_dma_mem_alloc(unsigned long size) +{ + unsigned long mem; + + mem = __get_dma_pages(GFP_KERNEL, get_order(size)); + + return mem; +} + +static inline void fd_dma_mem_free(unsigned long addr, unsigned long size) +{ + free_pages(addr, get_order(size)); +} + +static inline unsigned long fd_drive_type(unsigned long n) +{ + if (n == 0) + return 4; /* 3,5", 1.44mb */ + + return 0; +} + +#endif /* __ASM_MACH_GENERIC_FLOPPY_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/gpio.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/gpio.h new file mode 100644 index 000000000..b4e70208d --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/gpio.h @@ -0,0 +1,21 @@ +#ifndef __ASM_MACH_GENERIC_GPIO_H +#define __ASM_MACH_GENERIC_GPIO_H + +#ifdef CONFIG_GPIOLIB +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#else +int gpio_request(unsigned gpio, const char *label); +void gpio_free(unsigned gpio); +int gpio_direction_input(unsigned gpio); +int gpio_direction_output(unsigned gpio, int value); +int gpio_get_value(unsigned gpio); +void gpio_set_value(unsigned gpio, int value); +#endif +int gpio_to_irq(unsigned gpio); +int irq_to_gpio(unsigned irq); + +#include <asm-generic/gpio.h> /* cansleep wrappers */ + +#endif /* __ASM_MACH_GENERIC_GPIO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ide.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ide.h new file mode 100644 index 000000000..9c93a5b36 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ide.h @@ -0,0 +1,138 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + * + * Copied from i386; many of the especially older MIPS or ISA-based platforms + * are basically identical. Using this file probably implies i8259 PIC + * support in a system but the very least interrupt numbers 0 - 15 need to + * be put aside for legacy devices. + */ +#ifndef __ASM_MACH_GENERIC_IDE_H +#define __ASM_MACH_GENERIC_IDE_H + +#ifdef __KERNEL__ + +#include <linux/pci.h> +#include <linux/stddef.h> +#include <asm/processor.h> + +/* MIPS port and memory-mapped I/O string operations. */ +static inline void __ide_flush_prologue(void) +{ +#ifdef CONFIG_SMP + if (cpu_has_dc_aliases) + preempt_disable(); +#endif +} + +static inline void __ide_flush_epilogue(void) +{ +#ifdef CONFIG_SMP + if (cpu_has_dc_aliases) + preempt_enable(); +#endif +} + +static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) +{ + if (cpu_has_dc_aliases) { + unsigned long end = addr + size; + + while (addr < end) { + local_flush_data_cache_page((void *)addr); + addr += PAGE_SIZE; + } + } +} + +/* + * insw() and gang might be called with interrupts disabled, so we can't + * send IPIs for flushing due to the potencial of deadlocks, see the comment + * above smp_call_function() in arch/mips/kernel/smp.c. We work around the + * problem by disabling preemption so we know we actually perform the flush + * on the processor that actually has the lines to be flushed which hopefully + * is even better for performance anyway. + */ +static inline void __ide_insw(unsigned long port, void *addr, + unsigned int count) +{ + __ide_flush_prologue(); + insw(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 2); + __ide_flush_epilogue(); +} + +static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) +{ + __ide_flush_prologue(); + insl(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 4); + __ide_flush_epilogue(); +} + +static inline void __ide_outsw(unsigned long port, const void *addr, + unsigned long count) +{ + __ide_flush_prologue(); + outsw(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 2); + __ide_flush_epilogue(); +} + +static inline void __ide_outsl(unsigned long port, const void *addr, + unsigned long count) +{ + __ide_flush_prologue(); + outsl(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 4); + __ide_flush_epilogue(); +} + +static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) +{ + __ide_flush_prologue(); + readsw(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 2); + __ide_flush_epilogue(); +} + +static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) +{ + __ide_flush_prologue(); + readsl(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 4); + __ide_flush_epilogue(); +} + +static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) +{ + __ide_flush_prologue(); + writesw(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 2); + __ide_flush_epilogue(); +} + +static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) +{ + __ide_flush_prologue(); + writesl(port, addr, count); + __ide_flush_dcache_range((unsigned long)addr, count * 4); + __ide_flush_epilogue(); +} + +/* ide_insw calls insw, not __ide_insw. Why? */ +#undef insw +#undef insl +#undef outsw +#undef outsl +#define insw(port, addr, count) __ide_insw(port, addr, count) +#define insl(port, addr, count) __ide_insl(port, addr, count) +#define outsw(port, addr, count) __ide_outsw(port, addr, count) +#define outsl(port, addr, count) __ide_outsl(port, addr, count) + +#endif /* __KERNEL__ */ + +#endif /* __ASM_MACH_GENERIC_IDE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ioremap.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ioremap.h new file mode 100644 index 000000000..b379938d4 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/ioremap.h @@ -0,0 +1,34 @@ +/* + * include/asm-mips/mach-generic/ioremap.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef __ASM_MACH_GENERIC_IOREMAP_H +#define __ASM_MACH_GENERIC_IOREMAP_H + +#include <linux/types.h> + +/* + * Allow physical addresses to be fixed up to help peripherals located + * outside the low 32-bit range -- generic pass-through version. + */ +static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) +{ + return phys_addr; +} + +static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, + unsigned long flags) +{ + return NULL; +} + +static inline int plat_iounmap(const volatile void __iomem *addr) +{ + return 0; +} + +#endif /* __ASM_MACH_GENERIC_IOREMAP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/irq.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/irq.h new file mode 100644 index 000000000..173245260 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/irq.h @@ -0,0 +1,45 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 by Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_IRQ_H +#define __ASM_MACH_GENERIC_IRQ_H + +#ifndef NR_IRQS +#define NR_IRQS 48 +#endif + +#ifdef CONFIG_I8259 +#ifndef I8259A_IRQ_BASE +#define I8259A_IRQ_BASE 0 +#endif +#endif + +#ifdef CONFIG_IRQ_CPU + +#ifndef MIPS_CPU_IRQ_BASE +#ifdef CONFIG_I8259 +#define MIPS_CPU_IRQ_BASE 16 +#else +#define MIPS_CPU_IRQ_BASE 0 +#endif /* CONFIG_I8259 */ +#endif + +#ifdef CONFIG_IRQ_CPU_RM7K +#ifndef RM7K_CPU_IRQ_BASE +#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8) +#endif +#endif + +#ifdef CONFIG_IRQ_CPU_RM9K +#ifndef RM9K_CPU_IRQ_BASE +#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12) +#endif +#endif + +#endif /* CONFIG_IRQ_CPU */ + +#endif /* __ASM_MACH_GENERIC_IRQ_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kernel-entry-init.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kernel-entry-init.h new file mode 100644 index 000000000..7e66505fa --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kernel-entry-init.h @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 Embedded Alley Solutions, Inc + * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_MACH_GENERIC_KERNEL_ENTRY_H +#define __ASM_MACH_GENERIC_KERNEL_ENTRY_H + +/* Intentionally empty macro, used in head.S. Override in + * arch/mips/mach-xxx/kernel-entry-init.h when necessary. + */ +.macro kernel_entry_setup +.endm + +/* + * Do SMP slave processor setup necessary before we can savely execute C code. + */ + .macro smp_slave_setup + .endm + + +#endif /* __ASM_MACH_GENERIC_KERNEL_ENTRY_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kmalloc.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kmalloc.h new file mode 100644 index 000000000..b8e6deba3 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/kmalloc.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MACH_GENERIC_KMALLOC_H +#define __ASM_MACH_GENERIC_KMALLOC_H + + +#ifndef CONFIG_DMA_COHERENT +/* + * Total overkill for most systems but need as a safe default. + * Set this one if any device in the system might do non-coherent DMA. + */ +#define ARCH_KMALLOC_MINALIGN 128 +#endif + +#endif /* __ASM_MACH_GENERIC_KMALLOC_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mangle-port.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mangle-port.h new file mode 100644 index 000000000..f49dc9902 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mangle-port.h @@ -0,0 +1,52 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2004 Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H +#define __ASM_MACH_GENERIC_MANGLE_PORT_H + +#define __swizzle_addr_b(port) (port) +#define __swizzle_addr_w(port) (port) +#define __swizzle_addr_l(port) (port) +#define __swizzle_addr_q(port) (port) + +/* + * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; + * less sane hardware forces software to fiddle with this... + * + * Regardless, if the host bus endianness mismatches that of PCI/ISA, then + * you can't have the numerical value of data and byte addresses within + * multibyte quantities both preserved at the same time. Hence two + * variations of functions: non-prefixed ones that preserve the value + * and prefixed ones that preserve byte addresses. The latters are + * typically used for moving raw data between a peripheral and memory (cf. + * string I/O functions), hence the "__mem_" prefix. + */ +#if defined(CONFIG_SWAP_IO_SPACE) + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) le16_to_cpu(x) +# define __mem_ioswabw(a, x) (x) +# define ioswabl(a, x) le32_to_cpu(x) +# define __mem_ioswabl(a, x) (x) +# define ioswabq(a, x) le64_to_cpu(x) +# define __mem_ioswabq(a, x) (x) + +#else + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define __mem_ioswabw(a, x) cpu_to_le16(x) +# define ioswabl(a, x) (x) +# define __mem_ioswabl(a, x) cpu_to_le32(x) +# define ioswabq(a, x) (x) +# define __mem_ioswabq(a, x) cpu_to_le32(x) + +#endif + +#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mc146818rtc.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mc146818rtc.h new file mode 100644 index 000000000..0b9a942f0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/mc146818rtc.h @@ -0,0 +1,36 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998, 2001, 03 by Ralf Baechle + * + * RTC routines for PC style attached Dallas chip. + */ +#ifndef __ASM_MACH_GENERIC_MC146818RTC_H +#define __ASM_MACH_GENERIC_MC146818RTC_H + +#include <asm/io.h> + +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_IRQ 8 + +static inline unsigned char CMOS_READ(unsigned long addr) +{ + outb_p(addr, RTC_PORT(0)); + return inb_p(RTC_PORT(1)); +} + +static inline void CMOS_WRITE(unsigned char data, unsigned long addr) +{ + outb_p(addr, RTC_PORT(0)); + outb_p(data, RTC_PORT(1)); +} + +#define RTC_ALWAYS_BCD 1 + +#ifndef mc146818_decode_year +#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900) +#endif + +#endif /* __ASM_MACH_GENERIC_MC146818RTC_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/spaces.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/spaces.h new file mode 100644 index 000000000..c9fa4b149 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/spaces.h @@ -0,0 +1,85 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_MACH_GENERIC_SPACES_H +#define _ASM_MACH_GENERIC_SPACES_H + +#include <linux/const.h> + +/* + * This gives the physical RAM offset. + */ +#ifndef PHYS_OFFSET +#define PHYS_OFFSET _AC(0, UL) +#endif + +#ifdef CONFIG_32BIT + +#define CAC_BASE _AC(0x80000000, UL) +#define IO_BASE _AC(0xa0000000, UL) +#define UNCAC_BASE _AC(0xa0000000, UL) + +#ifndef MAP_BASE +#define MAP_BASE _AC(0xc0000000, UL) +#endif + +/* + * Memory above this physical address will be considered highmem. + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START _AC(0x20000000, UL) +#endif + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT + +#ifndef CAC_BASE +#ifdef CONFIG_DMA_NONCOHERENT +#define CAC_BASE _AC(0x9800000000000000, UL) +#else +#define CAC_BASE _AC(0xa800000000000000, UL) +#endif +#endif + +#ifndef IO_BASE +#define IO_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef UNCAC_BASE +#define UNCAC_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef MAP_BASE +#define MAP_BASE _AC(0xc000000000000000, UL) +#endif + +/* + * Memory above this physical address will be considered highmem. + * Fixme: 59 bits is a fictive number and makes assumptions about processors + * in the distant future. Nobody will care for a few years :-) + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL)) +#endif + +#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) +#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK)) +#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK)) + +#endif /* CONFIG_64BIT */ + +/* + * This handles the memory map. + */ +#ifndef PAGE_OFFSET +#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) +#endif + +#endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/topology.h b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/topology.h new file mode 100644 index 000000000..5428f333a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mach-generic/topology.h @@ -0,0 +1 @@ +#include <asm-generic/topology.h> diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mipsprom.h b/target/linux/realtek/files/arch/rlx/include/asm/mipsprom.h new file mode 100644 index 000000000..146d41b67 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mipsprom.h @@ -0,0 +1,76 @@ +#ifndef __ASM_MIPS_PROM_H +#define __ASM_MIPS_PROM_H + +#define PROM_RESET 0 +#define PROM_EXEC 1 +#define PROM_RESTART 2 +#define PROM_REINIT 3 +#define PROM_REBOOT 4 +#define PROM_AUTOBOOT 5 +#define PROM_OPEN 6 +#define PROM_READ 7 +#define PROM_WRITE 8 +#define PROM_IOCTL 9 +#define PROM_CLOSE 10 +#define PROM_GETCHAR 11 +#define PROM_PUTCHAR 12 +#define PROM_SHOWCHAR 13 /* XXX */ +#define PROM_GETS 14 /* XXX */ +#define PROM_PUTS 15 /* XXX */ +#define PROM_PRINTF 16 /* XXX */ + +/* What are these for? */ +#define PROM_INITPROTO 17 /* XXX */ +#define PROM_PROTOENABLE 18 /* XXX */ +#define PROM_PROTODISABLE 19 /* XXX */ +#define PROM_GETPKT 20 /* XXX */ +#define PROM_PUTPKT 21 /* XXX */ + +/* More PROM shit. Probably has to do with VME RMW cycles??? */ +#define PROM_ORW_RMW 22 /* XXX */ +#define PROM_ORH_RMW 23 /* XXX */ +#define PROM_ORB_RMW 24 /* XXX */ +#define PROM_ANDW_RMW 25 /* XXX */ +#define PROM_ANDH_RMW 26 /* XXX */ +#define PROM_ANDB_RMW 27 /* XXX */ + +/* Cache handling stuff */ +#define PROM_FLUSHCACHE 28 /* XXX */ +#define PROM_CLEARCACHE 29 /* XXX */ + +/* Libc alike stuff */ +#define PROM_SETJMP 30 /* XXX */ +#define PROM_LONGJMP 31 /* XXX */ +#define PROM_BEVUTLB 32 /* XXX */ +#define PROM_GETENV 33 /* XXX */ +#define PROM_SETENV 34 /* XXX */ +#define PROM_ATOB 35 /* XXX */ +#define PROM_STRCMP 36 /* XXX */ +#define PROM_STRLEN 37 /* XXX */ +#define PROM_STRCPY 38 /* XXX */ +#define PROM_STRCAT 39 /* XXX */ + +/* Misc stuff */ +#define PROM_PARSER 40 /* XXX */ +#define PROM_RANGE 41 /* XXX */ +#define PROM_ARGVIZE 42 /* XXX */ +#define PROM_HELP 43 /* XXX */ + +/* Entry points for some PROM commands */ +#define PROM_DUMPCMD 44 /* XXX */ +#define PROM_SETENVCMD 45 /* XXX */ +#define PROM_UNSETENVCMD 46 /* XXX */ +#define PROM_PRINTENVCMD 47 /* XXX */ +#define PROM_BEVEXCEPT 48 /* XXX */ +#define PROM_ENABLECMD 49 /* XXX */ +#define PROM_DISABLECMD 50 /* XXX */ + +#define PROM_CLEARNOFAULT 51 /* XXX */ +#define PROM_NOTIMPLEMENT 52 /* XXX */ + +#define PROM_NV_GET 53 /* XXX */ +#define PROM_NV_SET 54 /* XXX */ + +extern char *prom_getenv(char *); + +#endif /* __ASM_MIPS_PROM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mman.h b/target/linux/realtek/files/arch/rlx/include/asm/mman.h new file mode 100644 index 000000000..e4d6f1fb1 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mman.h @@ -0,0 +1,77 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999, 2002 by Ralf Baechle + */ +#ifndef _ASM_MMAN_H +#define _ASM_MMAN_H + +/* + * Protections are chosen from these bits, OR'd together. The + * implementation does not necessarily support PROT_EXEC or PROT_WRITE + * without PROT_READ. The only guarantees are that no writing will be + * allowed without PROT_WRITE and no access will be allowed for PROT_NONE. + */ +#define PROT_NONE 0x00 /* page can not be accessed */ +#define PROT_READ 0x01 /* page can be read */ +#define PROT_WRITE 0x02 /* page can be written */ +#define PROT_EXEC 0x04 /* page can be executed */ +/* 0x08 reserved for PROT_EXEC_NOFLUSH */ +#define PROT_SEM 0x10 /* page may be used for atomic ops */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +/* + * Flags for mmap + */ +#define MAP_SHARED 0x001 /* Share changes */ +#define MAP_PRIVATE 0x002 /* Changes are private */ +#define MAP_TYPE 0x00f /* Mask for type of mapping */ +#define MAP_FIXED 0x010 /* Interpret addr exactly */ + +/* not used by linux, but here to make sure we don't clash with ABI defines */ +#define MAP_RENAME 0x020 /* Assign page to file */ +#define MAP_AUTOGROW 0x040 /* File may grow by writing */ +#define MAP_LOCAL 0x080 /* Copy on fork/sproc */ +#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */ + +/* These are linux-specific */ +#define MAP_NORESERVE 0x0400 /* don't check for reservations */ +#define MAP_ANONYMOUS 0x0800 /* don't use a file */ +#define MAP_GROWSDOWN 0x1000 /* stack-like segment */ +#define MAP_DENYWRITE 0x2000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */ +#define MAP_LOCKED 0x8000 /* pages are locked */ +#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x20000 /* do not block on IO */ + +/* + * Flags for msync + */ +#define MS_ASYNC 0x0001 /* sync memory asynchronously */ +#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */ +#define MS_SYNC 0x0004 /* synchronous memory sync */ + +/* + * Flags for mlockall + */ +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +/* compatibility flags */ +#define MAP_FILE 0 + +#endif /* _ASM_MMAN_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mmu.h b/target/linux/realtek/files/arch/rlx/include/asm/mmu.h new file mode 100644 index 000000000..4063edd79 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mmu.h @@ -0,0 +1,6 @@ +#ifndef __ASM_MMU_H +#define __ASM_MMU_H + +typedef unsigned long mm_context_t[NR_CPUS]; + +#endif /* __ASM_MMU_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mmu_context.h b/target/linux/realtek/files/arch/rlx/include/asm/mmu_context.h new file mode 100644 index 000000000..c57345efb --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mmu_context.h @@ -0,0 +1,166 @@ +/* + * Switch a MMU context. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_MMU_CONTEXT_H +#define _ASM_MMU_CONTEXT_H + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/slab.h> +#include <asm/cacheflush.h> +#include <asm/hazards.h> +#include <asm/tlbflush.h> +#include <asm-generic/mm_hooks.h> + +/* + * For the fast tlb miss handlers, we keep a per cpu array of pointers + * to the current pgd for each processor. Also, the proc. id is stuffed + * into the context register. + */ +extern unsigned long pgd_current[]; + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ + pgd_current[smp_processor_id()] = (unsigned long)(pgd) + +#define TLBMISS_HANDLER_SETUP() \ + write_c0_context((unsigned long) smp_processor_id() << 25); \ + back_to_back_c0_hazard(); \ + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) + +#define ASID_INC 0x40 +#define ASID_MASK 0xfc0 + +#define cpu_context(cpu, mm) ((mm)->context[cpu]) +#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) +#define asid_cache(cpu) (cpu_data[cpu].asid_cache) + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) +#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) + +/* Normal, classic MIPS get_new_mmu_context */ +static inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +{ + unsigned long asid = asid_cache(cpu); + + if (! ((asid += ASID_INC) & ASID_MASK) ) { + local_flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + int i; + + for_each_online_cpu(i) + cpu_context(i, mm) = 0; + + return 0; +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + + /* Check if our ASID is of an older version and thus invalid */ + if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) + get_new_mmu_context(next, cpu); + + write_c0_entryhi(cpu_context(cpu, next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + + /* + * Mark current->active_mm as not "active" anymore. + * We don't want to mislead possible IPI tlb flush routines. + */ + cpu_clear(cpu, prev->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); + + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{ +} + +#define deactivate_mm(tsk, mm) do { } while (0) + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + unsigned int cpu = smp_processor_id(); + + local_irq_save(flags); + + /* Unconditionally get a new ASID. */ + get_new_mmu_context(next, cpu); + + write_c0_entryhi(cpu_context(cpu, next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + + /* mark mmu ownership change */ + cpu_clear(cpu, prev->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); + + local_irq_restore(flags); +} + +/* + * If mm is currently active_mm, we can't really drop it. Instead, + * we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm, unsigned cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + if (cpu_isset(cpu, mm->cpu_vm_mask)) { + get_new_mmu_context(mm, cpu); + write_c0_entryhi(cpu_asid(cpu, mm)); + } else { + /* will get a new context next time */ + cpu_context(cpu, mm) = 0; + } + local_irq_restore(flags); +} + +#endif /* _ASM_MMU_CONTEXT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mmzone.h b/target/linux/realtek/files/arch/rlx/include/asm/mmzone.h new file mode 100644 index 000000000..f53ec54c9 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* + * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 + * Rewritten for Linux 2.6 by Christoph Hellwig (hch@lst.de) Jan 2004 + */ +#ifndef _ASM_MMZONE_H_ +#define _ASM_MMZONE_H_ + +#include <asm/page.h> +#include <mmzone.h> + +#ifdef CONFIG_DISCONTIGMEM + +#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT) + +#endif /* CONFIG_DISCONTIGMEM */ + +#endif /* _ASM_MMZONE_H_ */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/module.h b/target/linux/realtek/files/arch/rlx/include/asm/module.h new file mode 100644 index 000000000..4095f9c69 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/module.h @@ -0,0 +1,65 @@ +#ifndef _ASM_MODULE_H +#define _ASM_MODULE_H + +#include <linux/list.h> +#include <asm/uaccess.h> + +struct mod_arch_specific { + /* Data Bus Error exception tables */ + struct list_head dbe_list; + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; +}; + +typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ + +typedef struct { + Elf64_Addr r_offset; /* Address of relocation. */ + Elf64_Word r_sym; /* Symbol index. */ + Elf64_Byte r_ssym; /* Special symbol. */ + Elf64_Byte r_type3; /* Third relocation. */ + Elf64_Byte r_type2; /* Second relocation. */ + Elf64_Byte r_type; /* First relocation. */ +} Elf64_Mips_Rel; + +typedef struct { + Elf64_Addr r_offset; /* Address of relocation. */ + Elf64_Word r_sym; /* Symbol index. */ + Elf64_Byte r_ssym; /* Special symbol. */ + Elf64_Byte r_type3; /* Third relocation. */ + Elf64_Byte r_type2; /* Second relocation. */ + Elf64_Byte r_type; /* First relocation. */ + Elf64_Sxword r_addend; /* Addend. */ +} Elf64_Mips_Rela; + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr + +#define Elf_Mips_Rel Elf32_Rel +#define Elf_Mips_Rela Elf32_Rela + +#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM(rel.r_info) +#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE(rel.r_info) + + +#ifdef CONFIG_MODULES +/* Given an address, look for it in the exception tables. */ +const struct exception_table_entry*search_module_dbetables(unsigned long addr); +#else +/* Given an address, look for it in the exception tables. */ +static inline const struct exception_table_entry * +search_module_dbetables(unsigned long addr) +{ + return NULL; +} +#endif + +#define MODULE_PROC_FAMILY "RLX " +#define MODULE_KERNEL_TYPE "32BIT " + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE + +#endif /* _ASM_MODULE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/msgbuf.h b/target/linux/realtek/files/arch/rlx/include/asm/msgbuf.h new file mode 100644 index 000000000..09115e649 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/msgbuf.h @@ -0,0 +1,47 @@ +#ifndef _ASM_MSGBUF_H +#define _ASM_MSGBUF_H + + +/* + * The msqid64_ds structure for the MIPS architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - extension of time_t to 64-bit on 32-bitsystem to solve the y2038 problem + * - 2 miscellaneous unsigned long values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; +#if !defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused1; +#endif + __kernel_time_t msg_stime; /* last msgsnd time */ +#if defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused1; +#endif +#if !defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused2; +#endif + __kernel_time_t msg_rtime; /* last msgrcv time */ +#if defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused2; +#endif +#if !defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused3; +#endif + __kernel_time_t msg_ctime; /* last change time */ +#if defined(CONFIG_CPU_LITTLE_ENDIAN) + unsigned long __unused3; +#endif + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _ASM_MSGBUF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/mutex.h b/target/linux/realtek/files/arch/rlx/include/asm/mutex.h new file mode 100644 index 000000000..458c1f7fb --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/target/linux/realtek/files/arch/rlx/include/asm/paccess.h b/target/linux/realtek/files/arch/rlx/include/asm/paccess.h new file mode 100644 index 000000000..ab5b0f5d9 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/paccess.h @@ -0,0 +1,107 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * + * Protected memory access. Used for everything that might take revenge + * by sending a DBE error like accessing possibly non-existant memory or + * devices. + */ +#ifndef _ASM_PACCESS_H +#define _ASM_PACCESS_H + +#include <linux/errno.h> + +#define __PA_ADDR ".word" + +extern asmlinkage void handle_ibe(void); +extern asmlinkage void handle_dbe(void); + +#define put_dbe(x, ptr) __put_dbe((x), (ptr), sizeof(*(ptr))) +#define get_dbe(x, ptr) __get_dbe((x), (ptr), sizeof(*(ptr))) + +struct __large_pstruct { unsigned long buf[100]; }; +#define __mp(x) (*(struct __large_pstruct *)(x)) + +#define __get_dbe(x, ptr, size) \ +({ \ + long __gu_err; \ + __typeof__(*(ptr)) __gu_val; \ + unsigned long __gu_addr; \ + __asm__("":"=r" (__gu_val)); \ + __gu_addr = (unsigned long) (ptr); \ + __asm__("":"=r" (__gu_err)); \ + switch (size) { \ + case 1: __get_dbe_asm("lb"); break; \ + case 2: __get_dbe_asm("lh"); break; \ + case 4: __get_dbe_asm("lw"); break; \ + case 8: __get_dbe_asm("ld"); break; \ + default: __get_dbe_unknown(); break; \ + } \ + x = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_dbe_asm(insn) \ +{ \ + __asm__ __volatile__( \ + "1:\t" insn "\t%1,%2\n\t" \ + "move\t%0,$0\n" \ + "2:\n\t" \ + ".section\t.fixup,\"ax\"\n" \ + "3:\tli\t%0,%3\n\t" \ + "move\t%1,$0\n\t" \ + "j\t2b\n\t" \ + ".previous\n\t" \ + ".section\t__dbe_table,\"a\"\n\t" \ + __PA_ADDR "\t1b, 3b\n\t" \ + ".previous" \ + :"=r" (__gu_err), "=r" (__gu_val) \ + :"o" (__mp(__gu_addr)), "i" (-EFAULT)); \ +} + +extern void __get_dbe_unknown(void); + +#define __put_dbe(x, ptr, size) \ +({ \ + long __pu_err; \ + __typeof__(*(ptr)) __pu_val; \ + long __pu_addr; \ + __pu_val = (x); \ + __pu_addr = (long) (ptr); \ + __asm__("":"=r" (__pu_err)); \ + switch (size) { \ + case 1: __put_dbe_asm("sb"); break; \ + case 2: __put_dbe_asm("sh"); break; \ + case 4: __put_dbe_asm("sw"); break; \ + case 8: __put_dbe_asm("sd"); break; \ + default: __put_dbe_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_dbe_asm(insn) \ +{ \ + __asm__ __volatile__( \ + "1:\t" insn "\t%1,%2\n\t" \ + "move\t%0,$0\n" \ + "2:\n\t" \ + ".section\t.fixup,\"ax\"\n" \ + "3:\tli\t%0,%3\n\t" \ + "j\t2b\n\t" \ + ".previous\n\t" \ + ".section\t__dbe_table,\"a\"\n\t" \ + __PA_ADDR "\t1b, 3b\n\t" \ + ".previous" \ + : "=r" (__pu_err) \ + : "r" (__pu_val), "o" (__mp(__pu_addr)), "i" (-EFAULT)); \ +} + +extern void __put_dbe_unknown(void); + +extern unsigned long search_dbe_table(unsigned long addr); + +#endif /* _ASM_PACCESS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/page.h b/target/linux/realtek/files/arch/rlx/include/asm/page.h new file mode 100644 index 000000000..8d08065af --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/page.h @@ -0,0 +1,136 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PAGE_H +#define _ASM_PAGE_H + +#include <spaces.h> + +/* + * PAGE_SHIFT determines the page size + */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) + +#ifndef __ASSEMBLY__ + +#include <linux/pfn.h> +#include <asm/io.h> + +extern void build_clear_page(void); +extern void build_copy_page(void); + +/* + * It's normally defined only for FLATMEM config but it's + * used in our early mem init code for all memory models. + * So always define it. + */ +#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) + +extern void clear_page(void * page); +extern void copy_page(void * to, void * from); + +extern unsigned long shm_align_mask; + +static inline unsigned long pages_do_alias(unsigned long addr1, + unsigned long addr2) +{ + return (addr1 ^ addr2) & shm_align_mask; +} + +struct page; + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + extern void (*flush_data_cache_page)(unsigned long addr); + + clear_page(addr); + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) + flush_data_cache_page((unsigned long)addr); +} + +static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *to) +{ + extern void (*flush_data_cache_page)(unsigned long addr); + copy_page(vto, vfrom); + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + flush_data_cache_page((unsigned long)vto); +} + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) } ) +typedef struct page *pgtable_t; + +/* + * Right now we don't support 4-level pagetables, so all pud-related + * definitions come from <asm-generic/pgtable-nopud.h>. + */ + +/* + * Finall the top of the hierarchy, the pgd + */ +typedef struct { unsigned long pgd; } pgd_t; +#define pgd_val(x) ((x).pgd) +#define __pgd(x) ((pgd_t) { (x) } ) + +/* + * Manipulate page protection bits + */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* + * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd + * pair of pages we only have a single global bit per pair of pages. When + * writing to the TLB make sure we always have the bit set for both pages + * or none. This macro is used to access the `buddy' of the pte we're just + * working on. + */ +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +#endif /* !__ASSEMBLY__ */ + +/* + * __pa()/__va() should be used only during mem init. + */ +#define __pa(x) \ + ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define pfn_valid(pfn) \ +({ \ + unsigned long __pfn = (pfn); \ + /* avoid <linux/bootmem.h> include hell */ \ + extern unsigned long min_low_pfn; \ + \ + __pfn >= min_low_pfn && __pfn < max_mapnr; \ +}) + +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) +#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) +#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* _ASM_PAGE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/param.h b/target/linux/realtek/files/arch/rlx/include/asm/param.h new file mode 100644 index 000000000..1d9bb8c5a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/param.h @@ -0,0 +1,31 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright 1994 - 2000, 2002 Ralf Baechle (ralf@gnu.org) + * Copyright 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PARAM_H +#define _ASM_PARAM_H + +#ifdef __KERNEL__ + +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ +#endif + +#ifndef HZ +#define HZ 100 +#endif + +#define EXEC_PAGESIZE 65536 + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _ASM_PARAM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/parport.h b/target/linux/realtek/files/arch/rlx/include/asm/parport.h new file mode 100644 index 000000000..f52656826 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/parport.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> + * + * This file should only be included by drivers/parport/parport_pc.c. + */ +#ifndef _ASM_PARPORT_H +#define _ASM_PARPORT_H + +static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); +static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ + return parport_pc_find_isa_ports(autoirq, autodma); +} + +#endif /* _ASM_PARPORT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pci.h b/target/linux/realtek/files/arch/rlx/include/asm/pci.h new file mode 100644 index 000000000..42b895b6f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pci.h @@ -0,0 +1,184 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_PCI_H +#define _ASM_PCI_H + +#include <linux/mm.h> + +#ifdef __KERNEL__ + +/* + * This file essentially defines the interface between board + * specific PCI code and MIPS common PCI code. Should potentially put + * into include/asm/pci.h file. + */ + +#include <linux/ioport.h> + +/* + * Each pci channel is a top-level PCI bus seem by CPU. A machine with + * multiple PCI channels may have multiple PCI host controllers or a + * single controller supporting multiple channels. + */ +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + + struct pci_ops *pci_ops; + struct resource *mem_resource; + unsigned long mem_offset; + struct resource *io_resource; + unsigned long io_offset; + unsigned long io_map_base; + + unsigned int index; + /* For compatibility with current (as of July 2003) pciutils + and XFree86. Eventually will be removed. */ + unsigned int need_domain_info; + + int iommu; + + /* Optional access methods for reading/writing the bus number + of the PCI controller */ + int (*get_busno)(void); + void (*set_busno)(int busno); +}; + +/* + * Used by boards to register their PCI busses before the actual scanning. + */ +extern struct pci_controller * alloc_pci_controller(void); +extern void register_pci_controller(struct pci_controller *hose); + +/* + * board supplied pci irq fixup routine + */ +extern int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin); + + +/* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +extern unsigned int pcibios_assign_all_busses(void); + +#define pcibios_scan_all_fns(a, b) 0 + +extern unsigned long PCIBIOS_MIN_IO; +extern unsigned long PCIBIOS_MIN_MEM; + +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +extern void pcibios_set_master(struct pci_dev *dev); + +static inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#define HAVE_PCI_MMAP + +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + +/* + * Dynamic DMA mapping stuff. + * MIPS has everything mapped statically. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <asm/scatterlist.h> +#include <linux/string.h> +#include <asm/io.h> + +struct pci_dev; + +/* + * The PCI address space does equal the physical memory address space. The + * networking and block device layers use this boolean for bounce buffer + * decisions. This is set if any hose does not have an IOMMU. + */ +extern unsigned int PCI_DMA_BUS_IS_PHYS; + +#ifdef CONFIG_DMA_NEED_PCI_MAP_STATE + +/* pci_unmap_{single,page} is not a nop, thus... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) + +#else /* CONFIG_DMA_NEED_PCI_MAP_STATE */ + +/* pci_unmap_{page,single} is a nop so... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) +#define pci_unmap_addr(PTR, ADDR_NAME) (0) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define pci_unmap_len(PTR, LEN_NAME) (0) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) + +#endif /* CONFIG_DMA_NEED_PCI_MAP_STATE */ + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + *strat = PCI_DMA_BURST_INFINITY; + *strategy_parameter = ~0UL; +} +#endif + +extern void pcibios_resource_to_bus(struct pci_dev *dev, + struct pci_bus_region *region, struct resource *res); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region); + +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + return hose->need_domain_info; +} + +#endif /* __KERNEL__ */ + +/* implement the pci_ DMA API in terms of the generic device dma_ one */ +#include <asm-generic/pci-dma-compat.h> + +/* Do platform specific device initialization at pci_enable_device() time */ +extern int pcibios_plat_dev_init(struct pci_dev *dev); + +/* Chances are this interrupt is wired PC-style ... */ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} + +extern int pci_probe_only; + +extern char * (*pcibios_plat_setup)(char *str); + +#endif /* _ASM_PCI_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pci/bridge.h b/target/linux/realtek/files/arch/rlx/include/asm/pci/bridge.h new file mode 100644 index 000000000..5f4b9d4e4 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pci/bridge.h @@ -0,0 +1,854 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * bridge.h - bridge chip header file, derived from IRIX <sys/PCI/bridge.h>, + * revision 1.76. + * + * Copyright (C) 1996, 1999 Silcon Graphics, Inc. + * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) + */ +#ifndef _ASM_PCI_BRIDGE_H +#define _ASM_PCI_BRIDGE_H + +#include <linux/types.h> +#include <linux/pci.h> +#include <asm/xtalk/xwidget.h> /* generic widget header */ +#include <asm/sn/types.h> + +/* I/O page size */ + +#define IOPFNSHIFT 12 /* 4K per mapped page */ + +#define IOPGSIZE (1 << IOPFNSHIFT) +#define IOPG(x) ((x) >> IOPFNSHIFT) +#define IOPGOFF(x) ((x) & (IOPGSIZE-1)) + +/* Bridge RAM sizes */ + +#define BRIDGE_ATE_RAM_SIZE 0x00000400 /* 1kB ATE RAM */ + +#define BRIDGE_CONFIG_BASE 0x20000 +#define BRIDGE_CONFIG1_BASE 0x28000 +#define BRIDGE_CONFIG_END 0x30000 +#define BRIDGE_CONFIG_SLOT_SIZE 0x1000 + +#define BRIDGE_SSRAM_512K 0x00080000 /* 512kB */ +#define BRIDGE_SSRAM_128K 0x00020000 /* 128kB */ +#define BRIDGE_SSRAM_64K 0x00010000 /* 64kB */ +#define BRIDGE_SSRAM_0K 0x00000000 /* 0kB */ + +/* ======================================================================== + * Bridge address map + */ + +#ifndef __ASSEMBLY__ + +/* + * All accesses to bridge hardware registers must be done + * using 32-bit loads and stores. + */ +typedef u32 bridgereg_t; + +typedef u64 bridge_ate_t; + +/* pointers to bridge ATEs + * are always "pointer to volatile" + */ +typedef volatile bridge_ate_t *bridge_ate_p; + +/* + * It is generally preferred that hardware registers on the bridge + * are located from C code via this structure. + * + * Generated from Bridge spec dated 04oct95 + */ + +typedef volatile struct bridge_s { + /* Local Registers 0x000000-0x00FFFF */ + + /* standard widget configuration 0x000000-0x000057 */ + widget_cfg_t b_widget; /* 0x000000 */ + + /* helper fieldnames for accessing bridge widget */ + +#define b_wid_id b_widget.w_id +#define b_wid_stat b_widget.w_status +#define b_wid_err_upper b_widget.w_err_upper_addr +#define b_wid_err_lower b_widget.w_err_lower_addr +#define b_wid_control b_widget.w_control +#define b_wid_req_timeout b_widget.w_req_timeout +#define b_wid_int_upper b_widget.w_intdest_upper_addr +#define b_wid_int_lower b_widget.w_intdest_lower_addr +#define b_wid_err_cmdword b_widget.w_err_cmd_word +#define b_wid_llp b_widget.w_llp_cfg +#define b_wid_tflush b_widget.w_tflush + + /* bridge-specific widget configuration 0x000058-0x00007F */ + bridgereg_t _pad_000058; + bridgereg_t b_wid_aux_err; /* 0x00005C */ + bridgereg_t _pad_000060; + bridgereg_t b_wid_resp_upper; /* 0x000064 */ + bridgereg_t _pad_000068; + bridgereg_t b_wid_resp_lower; /* 0x00006C */ + bridgereg_t _pad_000070; + bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */ + bridgereg_t _pad_000078[2]; + + /* PMU & Map 0x000080-0x00008F */ + bridgereg_t _pad_000080; + bridgereg_t b_dir_map; /* 0x000084 */ + bridgereg_t _pad_000088[2]; + + /* SSRAM 0x000090-0x00009F */ + bridgereg_t _pad_000090; + bridgereg_t b_ram_perr; /* 0x000094 */ + bridgereg_t _pad_000098[2]; + + /* Arbitration 0x0000A0-0x0000AF */ + bridgereg_t _pad_0000A0; + bridgereg_t b_arb; /* 0x0000A4 */ + bridgereg_t _pad_0000A8[2]; + + /* Number In A Can 0x0000B0-0x0000BF */ + bridgereg_t _pad_0000B0; + bridgereg_t b_nic; /* 0x0000B4 */ + bridgereg_t _pad_0000B8[2]; + + /* PCI/GIO 0x0000C0-0x0000FF */ + bridgereg_t _pad_0000C0; + bridgereg_t b_bus_timeout; /* 0x0000C4 */ +#define b_pci_bus_timeout b_bus_timeout + + bridgereg_t _pad_0000C8; + bridgereg_t b_pci_cfg; /* 0x0000CC */ + bridgereg_t _pad_0000D0; + bridgereg_t b_pci_err_upper; /* 0x0000D4 */ + bridgereg_t _pad_0000D8; + bridgereg_t b_pci_err_lower; /* 0x0000DC */ + bridgereg_t _pad_0000E0[8]; +#define b_gio_err_lower b_pci_err_lower +#define b_gio_err_upper b_pci_err_upper + + /* Interrupt 0x000100-0x0001FF */ + bridgereg_t _pad_000100; + bridgereg_t b_int_status; /* 0x000104 */ + bridgereg_t _pad_000108; + bridgereg_t b_int_enable; /* 0x00010C */ + bridgereg_t _pad_000110; + bridgereg_t b_int_rst_stat; /* 0x000114 */ + bridgereg_t _pad_000118; + bridgereg_t b_int_mode; /* 0x00011C */ + bridgereg_t _pad_000120; + bridgereg_t b_int_device; /* 0x000124 */ + bridgereg_t _pad_000128; + bridgereg_t b_int_host_err; /* 0x00012C */ + + struct { + bridgereg_t __pad; /* 0x0001{30,,,68} */ + bridgereg_t addr; /* 0x0001{34,,,6C} */ + } b_int_addr[8]; /* 0x000130 */ + + bridgereg_t _pad_000170[36]; + + /* Device 0x000200-0x0003FF */ + struct { + bridgereg_t __pad; /* 0x0002{00,,,38} */ + bridgereg_t reg; /* 0x0002{04,,,3C} */ + } b_device[8]; /* 0x000200 */ + + struct { + bridgereg_t __pad; /* 0x0002{40,,,78} */ + bridgereg_t reg; /* 0x0002{44,,,7C} */ + } b_wr_req_buf[8]; /* 0x000240 */ + + struct { + bridgereg_t __pad; /* 0x0002{80,,,88} */ + bridgereg_t reg; /* 0x0002{84,,,8C} */ + } b_rrb_map[2]; /* 0x000280 */ +#define b_even_resp b_rrb_map[0].reg /* 0x000284 */ +#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */ + + bridgereg_t _pad_000290; + bridgereg_t b_resp_status; /* 0x000294 */ + bridgereg_t _pad_000298; + bridgereg_t b_resp_clear; /* 0x00029C */ + + bridgereg_t _pad_0002A0[24]; + + char _pad_000300[0x10000 - 0x000300]; + + /* Internal Address Translation Entry RAM 0x010000-0x0103FF */ + union { + bridge_ate_t wr; /* write-only */ + struct { + bridgereg_t _p_pad; + bridgereg_t rd; /* read-only */ + } hi; + } b_int_ate_ram[128]; + + char _pad_010400[0x11000 - 0x010400]; + + /* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */ + struct { + bridgereg_t _p_pad; + bridgereg_t rd; /* read-only */ + } b_int_ate_ram_lo[128]; + + char _pad_011400[0x20000 - 0x011400]; + + /* PCI Device Configuration Spaces 0x020000-0x027FFF */ + union { /* make all access sizes available. */ + u8 c[0x1000 / 1]; + u16 s[0x1000 / 2]; + u32 l[0x1000 / 4]; + u64 d[0x1000 / 8]; + union { + u8 c[0x100 / 1]; + u16 s[0x100 / 2]; + u32 l[0x100 / 4]; + u64 d[0x100 / 8]; + } f[8]; + } b_type0_cfg_dev[8]; /* 0x020000 */ + + /* PCI Type 1 Configuration Space 0x028000-0x028FFF */ + union { /* make all access sizes available. */ + u8 c[0x1000 / 1]; + u16 s[0x1000 / 2]; + u32 l[0x1000 / 4]; + u64 d[0x1000 / 8]; + } b_type1_cfg; /* 0x028000-0x029000 */ + + char _pad_029000[0x007000]; /* 0x029000-0x030000 */ + + /* PCI Interrupt Acknowledge Cycle 0x030000 */ + union { + u8 c[8 / 1]; + u16 s[8 / 2]; + u32 l[8 / 4]; + u64 d[8 / 8]; + } b_pci_iack; /* 0x030000 */ + + u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */ + + /* External Address Translation Entry RAM 0x080000-0x0FFFFF */ + bridge_ate_t b_ext_ate_ram[0x10000]; + + /* Reserved 0x100000-0x1FFFFF */ + char _pad_100000[0x200000-0x100000]; + + /* PCI/GIO Device Spaces 0x200000-0xBFFFFF */ + union { /* make all access sizes available. */ + u8 c[0x100000 / 1]; + u16 s[0x100000 / 2]; + u32 l[0x100000 / 4]; + u64 d[0x100000 / 8]; + } b_devio_raw[10]; /* 0x200000 */ + + /* b_devio macro is a bit strange; it reflects the + * fact that the Bridge ASIC provides 2M for the + * first two DevIO windows and 1M for the other six. + */ +#define b_devio(n) b_devio_raw[((n)<2)?(n*2):(n+2)] + + /* External Flash Proms 1,0 0xC00000-0xFFFFFF */ + union { /* make all access sizes available. */ + u8 c[0x400000 / 1]; /* read-only */ + u16 s[0x400000 / 2]; /* read-write */ + u32 l[0x400000 / 4]; /* read-only */ + u64 d[0x400000 / 8]; /* read-only */ + } b_external_flash; /* 0xC00000 */ +} bridge_t; + +/* + * Field formats for Error Command Word and Auxillary Error Command Word + * of bridge. + */ +typedef struct bridge_err_cmdword_s { + union { + u32 cmd_word; + struct { + u32 didn:4, /* Destination ID */ + sidn:4, /* Source ID */ + pactyp:4, /* Packet type */ + tnum:5, /* Trans Number */ + coh:1, /* Coh Transacti */ + ds:2, /* Data size */ + gbr:1, /* GBR enable */ + vbpm:1, /* VBPM message */ + error:1, /* Error occurred */ + barr:1, /* Barrier op */ + rsvd:8; + } berr_st; + } berr_un; +} bridge_err_cmdword_t; + +#define berr_field berr_un.berr_st +#endif /* !__ASSEMBLY__ */ + +/* + * The values of these macros can and should be crosschecked + * regularly against the offsets of the like-named fields + * within the "bridge_t" structure above. + */ + +/* Byte offset macros for Bridge internal registers */ + +#define BRIDGE_WID_ID WIDGET_ID +#define BRIDGE_WID_STAT WIDGET_STATUS +#define BRIDGE_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR +#define BRIDGE_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR +#define BRIDGE_WID_CONTROL WIDGET_CONTROL +#define BRIDGE_WID_REQ_TIMEOUT WIDGET_REQ_TIMEOUT +#define BRIDGE_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR +#define BRIDGE_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR +#define BRIDGE_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD +#define BRIDGE_WID_LLP WIDGET_LLP_CFG +#define BRIDGE_WID_TFLUSH WIDGET_TFLUSH + +#define BRIDGE_WID_AUX_ERR 0x00005C /* Aux Error Command Word */ +#define BRIDGE_WID_RESP_UPPER 0x000064 /* Response Buf Upper Addr */ +#define BRIDGE_WID_RESP_LOWER 0x00006C /* Response Buf Lower Addr */ +#define BRIDGE_WID_TST_PIN_CTRL 0x000074 /* Test pin control */ + +#define BRIDGE_DIR_MAP 0x000084 /* Direct Map reg */ + +#define BRIDGE_RAM_PERR 0x000094 /* SSRAM Parity Error */ + +#define BRIDGE_ARB 0x0000A4 /* Arbitration Priority reg */ + +#define BRIDGE_NIC 0x0000B4 /* Number In A Can */ + +#define BRIDGE_BUS_TIMEOUT 0x0000C4 /* Bus Timeout Register */ +#define BRIDGE_PCI_BUS_TIMEOUT BRIDGE_BUS_TIMEOUT +#define BRIDGE_PCI_CFG 0x0000CC /* PCI Type 1 Config reg */ +#define BRIDGE_PCI_ERR_UPPER 0x0000D4 /* PCI error Upper Addr */ +#define BRIDGE_PCI_ERR_LOWER 0x0000DC /* PCI error Lower Addr */ + +#define BRIDGE_INT_STATUS 0x000104 /* Interrupt Status */ +#define BRIDGE_INT_ENABLE 0x00010C /* Interrupt Enables */ +#define BRIDGE_INT_RST_STAT 0x000114 /* Reset Intr Status */ +#define BRIDGE_INT_MODE 0x00011C /* Interrupt Mode */ +#define BRIDGE_INT_DEVICE 0x000124 /* Interrupt Device */ +#define BRIDGE_INT_HOST_ERR 0x00012C /* Host Error Field */ + +#define BRIDGE_INT_ADDR0 0x000134 /* Host Address Reg */ +#define BRIDGE_INT_ADDR_OFF 0x000008 /* Host Addr offset (1..7) */ +#define BRIDGE_INT_ADDR(x) (BRIDGE_INT_ADDR0+(x)*BRIDGE_INT_ADDR_OFF) + +#define BRIDGE_DEVICE0 0x000204 /* Device 0 */ +#define BRIDGE_DEVICE_OFF 0x000008 /* Device offset (1..7) */ +#define BRIDGE_DEVICE(x) (BRIDGE_DEVICE0+(x)*BRIDGE_DEVICE_OFF) + +#define BRIDGE_WR_REQ_BUF0 0x000244 /* Write Request Buffer 0 */ +#define BRIDGE_WR_REQ_BUF_OFF 0x000008 /* Buffer Offset (1..7) */ +#define BRIDGE_WR_REQ_BUF(x) (BRIDGE_WR_REQ_BUF0+(x)*BRIDGE_WR_REQ_BUF_OFF) + +#define BRIDGE_EVEN_RESP 0x000284 /* Even Device Response Buf */ +#define BRIDGE_ODD_RESP 0x00028C /* Odd Device Response Buf */ + +#define BRIDGE_RESP_STATUS 0x000294 /* Read Response Status reg */ +#define BRIDGE_RESP_CLEAR 0x00029C /* Read Response Clear reg */ + +/* Byte offset macros for Bridge I/O space */ + +#define BRIDGE_ATE_RAM 0x00010000 /* Internal Addr Xlat Ram */ + +#define BRIDGE_TYPE0_CFG_DEV0 0x00020000 /* Type 0 Cfg, Device 0 */ +#define BRIDGE_TYPE0_CFG_SLOT_OFF 0x00001000 /* Type 0 Cfg Slot Offset (1..7) */ +#define BRIDGE_TYPE0_CFG_FUNC_OFF 0x00000100 /* Type 0 Cfg Func Offset (1..7) */ +#define BRIDGE_TYPE0_CFG_DEV(s) (BRIDGE_TYPE0_CFG_DEV0+\ + (s)*BRIDGE_TYPE0_CFG_SLOT_OFF) +#define BRIDGE_TYPE0_CFG_DEVF(s, f) (BRIDGE_TYPE0_CFG_DEV0+\ + (s)*BRIDGE_TYPE0_CFG_SLOT_OFF+\ + (f)*BRIDGE_TYPE0_CFG_FUNC_OFF) + +#define BRIDGE_TYPE1_CFG 0x00028000 /* Type 1 Cfg space */ + +#define BRIDGE_PCI_IACK 0x00030000 /* PCI Interrupt Ack */ +#define BRIDGE_EXT_SSRAM 0x00080000 /* Extern SSRAM (ATE) */ + +/* Byte offset macros for Bridge device IO spaces */ + +#define BRIDGE_DEV_CNT 8 /* Up to 8 devices per bridge */ +#define BRIDGE_DEVIO0 0x00200000 /* Device IO 0 Addr */ +#define BRIDGE_DEVIO1 0x00400000 /* Device IO 1 Addr */ +#define BRIDGE_DEVIO2 0x00600000 /* Device IO 2 Addr */ +#define BRIDGE_DEVIO_OFF 0x00100000 /* Device IO Offset (3..7) */ + +#define BRIDGE_DEVIO_2MB 0x00200000 /* Device IO Offset (0..1) */ +#define BRIDGE_DEVIO_1MB 0x00100000 /* Device IO Offset (2..7) */ + +#define BRIDGE_DEVIO(x) ((x)<=1 ? BRIDGE_DEVIO0+(x)*BRIDGE_DEVIO_2MB : BRIDGE_DEVIO2+((x)-2)*BRIDGE_DEVIO_1MB) + +#define BRIDGE_EXTERNAL_FLASH 0x00C00000 /* External Flash PROMS */ + +/* ======================================================================== + * Bridge register bit field definitions + */ + +/* Widget part number of bridge */ +#define BRIDGE_WIDGET_PART_NUM 0xc002 +#define XBRIDGE_WIDGET_PART_NUM 0xd002 + +/* Manufacturer of bridge */ +#define BRIDGE_WIDGET_MFGR_NUM 0x036 +#define XBRIDGE_WIDGET_MFGR_NUM 0x024 + +/* Revision numbers for known Bridge revisions */ +#define BRIDGE_REV_A 0x1 +#define BRIDGE_REV_B 0x2 +#define BRIDGE_REV_C 0x3 +#define BRIDGE_REV_D 0x4 + +/* Bridge widget status register bits definition */ + +#define BRIDGE_STAT_LLP_REC_CNT (0xFFu << 24) +#define BRIDGE_STAT_LLP_TX_CNT (0xFF << 16) +#define BRIDGE_STAT_FLASH_SELECT (0x1 << 6) +#define BRIDGE_STAT_PCI_GIO_N (0x1 << 5) +#define BRIDGE_STAT_PENDING (0x1F << 0) + +/* Bridge widget control register bits definition */ +#define BRIDGE_CTRL_FLASH_WR_EN (0x1ul << 31) +#define BRIDGE_CTRL_EN_CLK50 (0x1 << 30) +#define BRIDGE_CTRL_EN_CLK40 (0x1 << 29) +#define BRIDGE_CTRL_EN_CLK33 (0x1 << 28) +#define BRIDGE_CTRL_RST(n) ((n) << 24) +#define BRIDGE_CTRL_RST_MASK (BRIDGE_CTRL_RST(0xF)) +#define BRIDGE_CTRL_RST_PIN(x) (BRIDGE_CTRL_RST(0x1 << (x))) +#define BRIDGE_CTRL_IO_SWAP (0x1 << 23) +#define BRIDGE_CTRL_MEM_SWAP (0x1 << 22) +#define BRIDGE_CTRL_PAGE_SIZE (0x1 << 21) +#define BRIDGE_CTRL_SS_PAR_BAD (0x1 << 20) +#define BRIDGE_CTRL_SS_PAR_EN (0x1 << 19) +#define BRIDGE_CTRL_SSRAM_SIZE(n) ((n) << 17) +#define BRIDGE_CTRL_SSRAM_SIZE_MASK (BRIDGE_CTRL_SSRAM_SIZE(0x3)) +#define BRIDGE_CTRL_SSRAM_512K (BRIDGE_CTRL_SSRAM_SIZE(0x3)) +#define BRIDGE_CTRL_SSRAM_128K (BRIDGE_CTRL_SSRAM_SIZE(0x2)) +#define BRIDGE_CTRL_SSRAM_64K (BRIDGE_CTRL_SSRAM_SIZE(0x1)) +#define BRIDGE_CTRL_SSRAM_1K (BRIDGE_CTRL_SSRAM_SIZE(0x0)) +#define BRIDGE_CTRL_F_BAD_PKT (0x1 << 16) +#define BRIDGE_CTRL_LLP_XBAR_CRD(n) ((n) << 12) +#define BRIDGE_CTRL_LLP_XBAR_CRD_MASK (BRIDGE_CTRL_LLP_XBAR_CRD(0xf)) +#define BRIDGE_CTRL_CLR_RLLP_CNT (0x1 << 11) +#define BRIDGE_CTRL_CLR_TLLP_CNT (0x1 << 10) +#define BRIDGE_CTRL_SYS_END (0x1 << 9) +#define BRIDGE_CTRL_MAX_TRANS(n) ((n) << 4) +#define BRIDGE_CTRL_MAX_TRANS_MASK (BRIDGE_CTRL_MAX_TRANS(0x1f)) +#define BRIDGE_CTRL_WIDGET_ID(n) ((n) << 0) +#define BRIDGE_CTRL_WIDGET_ID_MASK (BRIDGE_CTRL_WIDGET_ID(0xf)) + +/* Bridge Response buffer Error Upper Register bit fields definition */ +#define BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT (20) +#define BRIDGE_RESP_ERRUPPR_DEVNUM_MASK (0x7 << BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT) +#define BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT (16) +#define BRIDGE_RESP_ERRUPPR_BUFNUM_MASK (0xF << BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT) +#define BRIDGE_RESP_ERRRUPPR_BUFMASK (0xFFFF) + +#define BRIDGE_RESP_ERRUPPR_BUFNUM(x) \ + (((x) & BRIDGE_RESP_ERRUPPR_BUFNUM_MASK) >> \ + BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT) + +#define BRIDGE_RESP_ERRUPPR_DEVICE(x) \ + (((x) & BRIDGE_RESP_ERRUPPR_DEVNUM_MASK) >> \ + BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT) + +/* Bridge direct mapping register bits definition */ +#define BRIDGE_DIRMAP_W_ID_SHFT 20 +#define BRIDGE_DIRMAP_W_ID (0xf << BRIDGE_DIRMAP_W_ID_SHFT) +#define BRIDGE_DIRMAP_RMF_64 (0x1 << 18) +#define BRIDGE_DIRMAP_ADD512 (0x1 << 17) +#define BRIDGE_DIRMAP_OFF (0x1ffff << 0) +#define BRIDGE_DIRMAP_OFF_ADDRSHFT (31) /* lsbit of DIRMAP_OFF is xtalk address bit 31 */ + +/* Bridge Arbitration register bits definition */ +#define BRIDGE_ARB_REQ_WAIT_TICK(x) ((x) << 16) +#define BRIDGE_ARB_REQ_WAIT_TICK_MASK BRIDGE_ARB_REQ_WAIT_TICK(0x3) +#define BRIDGE_ARB_REQ_WAIT_EN(x) ((x) << 8) +#define BRIDGE_ARB_REQ_WAIT_EN_MASK BRIDGE_ARB_REQ_WAIT_EN(0xff) +#define BRIDGE_ARB_FREEZE_GNT (1 << 6) +#define BRIDGE_ARB_HPRI_RING_B2 (1 << 5) +#define BRIDGE_ARB_HPRI_RING_B1 (1 << 4) +#define BRIDGE_ARB_HPRI_RING_B0 (1 << 3) +#define BRIDGE_ARB_LPRI_RING_B2 (1 << 2) +#define BRIDGE_ARB_LPRI_RING_B1 (1 << 1) +#define BRIDGE_ARB_LPRI_RING_B0 (1 << 0) + +/* Bridge Bus time-out register bits definition */ +#define BRIDGE_BUS_PCI_RETRY_HLD(x) ((x) << 16) +#define BRIDGE_BUS_PCI_RETRY_HLD_MASK BRIDGE_BUS_PCI_RETRY_HLD(0x1f) +#define BRIDGE_BUS_GIO_TIMEOUT (1 << 12) +#define BRIDGE_BUS_PCI_RETRY_CNT(x) ((x) << 0) +#define BRIDGE_BUS_PCI_RETRY_MASK BRIDGE_BUS_PCI_RETRY_CNT(0x3ff) + +/* Bridge interrupt status register bits definition */ +#define BRIDGE_ISR_MULTI_ERR (0x1u << 31) +#define BRIDGE_ISR_PMU_ESIZE_FAULT (0x1 << 30) +#define BRIDGE_ISR_UNEXP_RESP (0x1 << 29) +#define BRIDGE_ISR_BAD_XRESP_PKT (0x1 << 28) +#define BRIDGE_ISR_BAD_XREQ_PKT (0x1 << 27) +#define BRIDGE_ISR_RESP_XTLK_ERR (0x1 << 26) +#define BRIDGE_ISR_REQ_XTLK_ERR (0x1 << 25) +#define BRIDGE_ISR_INVLD_ADDR (0x1 << 24) +#define BRIDGE_ISR_UNSUPPORTED_XOP (0x1 << 23) +#define BRIDGE_ISR_XREQ_FIFO_OFLOW (0x1 << 22) +#define BRIDGE_ISR_LLP_REC_SNERR (0x1 << 21) +#define BRIDGE_ISR_LLP_REC_CBERR (0x1 << 20) +#define BRIDGE_ISR_LLP_RCTY (0x1 << 19) +#define BRIDGE_ISR_LLP_TX_RETRY (0x1 << 18) +#define BRIDGE_ISR_LLP_TCTY (0x1 << 17) +#define BRIDGE_ISR_SSRAM_PERR (0x1 << 16) +#define BRIDGE_ISR_PCI_ABORT (0x1 << 15) +#define BRIDGE_ISR_PCI_PARITY (0x1 << 14) +#define BRIDGE_ISR_PCI_SERR (0x1 << 13) +#define BRIDGE_ISR_PCI_PERR (0x1 << 12) +#define BRIDGE_ISR_PCI_MST_TIMEOUT (0x1 << 11) +#define BRIDGE_ISR_GIO_MST_TIMEOUT BRIDGE_ISR_PCI_MST_TIMEOUT +#define BRIDGE_ISR_PCI_RETRY_CNT (0x1 << 10) +#define BRIDGE_ISR_XREAD_REQ_TIMEOUT (0x1 << 9) +#define BRIDGE_ISR_GIO_B_ENBL_ERR (0x1 << 8) +#define BRIDGE_ISR_INT_MSK (0xff << 0) +#define BRIDGE_ISR_INT(x) (0x1 << (x)) + +#define BRIDGE_ISR_LINK_ERROR \ + (BRIDGE_ISR_LLP_REC_SNERR|BRIDGE_ISR_LLP_REC_CBERR| \ + BRIDGE_ISR_LLP_RCTY|BRIDGE_ISR_LLP_TX_RETRY| \ + BRIDGE_ISR_LLP_TCTY) + +#define BRIDGE_ISR_PCIBUS_PIOERR \ + (BRIDGE_ISR_PCI_MST_TIMEOUT|BRIDGE_ISR_PCI_ABORT) + +#define BRIDGE_ISR_PCIBUS_ERROR \ + (BRIDGE_ISR_PCIBUS_PIOERR|BRIDGE_ISR_PCI_PERR| \ + BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_RETRY_CNT| \ + BRIDGE_ISR_PCI_PARITY) + +#define BRIDGE_ISR_XTALK_ERROR \ + (BRIDGE_ISR_XREAD_REQ_TIMEOUT|BRIDGE_ISR_XREQ_FIFO_OFLOW|\ + BRIDGE_ISR_UNSUPPORTED_XOP|BRIDGE_ISR_INVLD_ADDR| \ + BRIDGE_ISR_REQ_XTLK_ERR|BRIDGE_ISR_RESP_XTLK_ERR| \ + BRIDGE_ISR_BAD_XREQ_PKT|BRIDGE_ISR_BAD_XRESP_PKT| \ + BRIDGE_ISR_UNEXP_RESP) + +#define BRIDGE_ISR_ERRORS \ + (BRIDGE_ISR_LINK_ERROR|BRIDGE_ISR_PCIBUS_ERROR| \ + BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR| \ + BRIDGE_ISR_PMU_ESIZE_FAULT) + +/* + * List of Errors which are fatal and kill the system + */ +#define BRIDGE_ISR_ERROR_FATAL \ + ((BRIDGE_ISR_XTALK_ERROR & ~BRIDGE_ISR_XREAD_REQ_TIMEOUT)|\ + BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_PARITY ) + +#define BRIDGE_ISR_ERROR_DUMP \ + (BRIDGE_ISR_PCIBUS_ERROR|BRIDGE_ISR_PMU_ESIZE_FAULT| \ + BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR) + +/* Bridge interrupt enable register bits definition */ +#define BRIDGE_IMR_UNEXP_RESP BRIDGE_ISR_UNEXP_RESP +#define BRIDGE_IMR_PMU_ESIZE_FAULT BRIDGE_ISR_PMU_ESIZE_FAULT +#define BRIDGE_IMR_BAD_XRESP_PKT BRIDGE_ISR_BAD_XRESP_PKT +#define BRIDGE_IMR_BAD_XREQ_PKT BRIDGE_ISR_BAD_XREQ_PKT +#define BRIDGE_IMR_RESP_XTLK_ERR BRIDGE_ISR_RESP_XTLK_ERR +#define BRIDGE_IMR_REQ_XTLK_ERR BRIDGE_ISR_REQ_XTLK_ERR +#define BRIDGE_IMR_INVLD_ADDR BRIDGE_ISR_INVLD_ADDR +#define BRIDGE_IMR_UNSUPPORTED_XOP BRIDGE_ISR_UNSUPPORTED_XOP +#define BRIDGE_IMR_XREQ_FIFO_OFLOW BRIDGE_ISR_XREQ_FIFO_OFLOW +#define BRIDGE_IMR_LLP_REC_SNERR BRIDGE_ISR_LLP_REC_SNERR +#define BRIDGE_IMR_LLP_REC_CBERR BRIDGE_ISR_LLP_REC_CBERR +#define BRIDGE_IMR_LLP_RCTY BRIDGE_ISR_LLP_RCTY +#define BRIDGE_IMR_LLP_TX_RETRY BRIDGE_ISR_LLP_TX_RETRY +#define BRIDGE_IMR_LLP_TCTY BRIDGE_ISR_LLP_TCTY +#define BRIDGE_IMR_SSRAM_PERR BRIDGE_ISR_SSRAM_PERR +#define BRIDGE_IMR_PCI_ABORT BRIDGE_ISR_PCI_ABORT +#define BRIDGE_IMR_PCI_PARITY BRIDGE_ISR_PCI_PARITY +#define BRIDGE_IMR_PCI_SERR BRIDGE_ISR_PCI_SERR +#define BRIDGE_IMR_PCI_PERR BRIDGE_ISR_PCI_PERR +#define BRIDGE_IMR_PCI_MST_TIMEOUT BRIDGE_ISR_PCI_MST_TIMEOUT +#define BRIDGE_IMR_GIO_MST_TIMEOUT BRIDGE_ISR_GIO_MST_TIMEOUT +#define BRIDGE_IMR_PCI_RETRY_CNT BRIDGE_ISR_PCI_RETRY_CNT +#define BRIDGE_IMR_XREAD_REQ_TIMEOUT BRIDGE_ISR_XREAD_REQ_TIMEOUT +#define BRIDGE_IMR_GIO_B_ENBL_ERR BRIDGE_ISR_GIO_B_ENBL_ERR +#define BRIDGE_IMR_INT_MSK BRIDGE_ISR_INT_MSK +#define BRIDGE_IMR_INT(x) BRIDGE_ISR_INT(x) + +/* Bridge interrupt reset register bits definition */ +#define BRIDGE_IRR_MULTI_CLR (0x1 << 6) +#define BRIDGE_IRR_CRP_GRP_CLR (0x1 << 5) +#define BRIDGE_IRR_RESP_BUF_GRP_CLR (0x1 << 4) +#define BRIDGE_IRR_REQ_DSP_GRP_CLR (0x1 << 3) +#define BRIDGE_IRR_LLP_GRP_CLR (0x1 << 2) +#define BRIDGE_IRR_SSRAM_GRP_CLR (0x1 << 1) +#define BRIDGE_IRR_PCI_GRP_CLR (0x1 << 0) +#define BRIDGE_IRR_GIO_GRP_CLR (0x1 << 0) +#define BRIDGE_IRR_ALL_CLR 0x7f + +#define BRIDGE_IRR_CRP_GRP (BRIDGE_ISR_UNEXP_RESP | \ + BRIDGE_ISR_XREQ_FIFO_OFLOW) +#define BRIDGE_IRR_RESP_BUF_GRP (BRIDGE_ISR_BAD_XRESP_PKT | \ + BRIDGE_ISR_RESP_XTLK_ERR | \ + BRIDGE_ISR_XREAD_REQ_TIMEOUT) +#define BRIDGE_IRR_REQ_DSP_GRP (BRIDGE_ISR_UNSUPPORTED_XOP | \ + BRIDGE_ISR_BAD_XREQ_PKT | \ + BRIDGE_ISR_REQ_XTLK_ERR | \ + BRIDGE_ISR_INVLD_ADDR) +#define BRIDGE_IRR_LLP_GRP (BRIDGE_ISR_LLP_REC_SNERR | \ + BRIDGE_ISR_LLP_REC_CBERR | \ + BRIDGE_ISR_LLP_RCTY | \ + BRIDGE_ISR_LLP_TX_RETRY | \ + BRIDGE_ISR_LLP_TCTY) +#define BRIDGE_IRR_SSRAM_GRP (BRIDGE_ISR_SSRAM_PERR | \ + BRIDGE_ISR_PMU_ESIZE_FAULT) +#define BRIDGE_IRR_PCI_GRP (BRIDGE_ISR_PCI_ABORT | \ + BRIDGE_ISR_PCI_PARITY | \ + BRIDGE_ISR_PCI_SERR | \ + BRIDGE_ISR_PCI_PERR | \ + BRIDGE_ISR_PCI_MST_TIMEOUT | \ + BRIDGE_ISR_PCI_RETRY_CNT) + +#define BRIDGE_IRR_GIO_GRP (BRIDGE_ISR_GIO_B_ENBL_ERR | \ + BRIDGE_ISR_GIO_MST_TIMEOUT) + +/* Bridge INT_DEV register bits definition */ +#define BRIDGE_INT_DEV_SHFT(n) ((n)*3) +#define BRIDGE_INT_DEV_MASK(n) (0x7 << BRIDGE_INT_DEV_SHFT(n)) +#define BRIDGE_INT_DEV_SET(_dev, _line) (_dev << BRIDGE_INT_DEV_SHFT(_line)) + +/* Bridge interrupt(x) register bits definition */ +#define BRIDGE_INT_ADDR_HOST 0x0003FF00 +#define BRIDGE_INT_ADDR_FLD 0x000000FF + +#define BRIDGE_TMO_PCI_RETRY_HLD_MASK 0x1f0000 +#define BRIDGE_TMO_GIO_TIMEOUT_MASK 0x001000 +#define BRIDGE_TMO_PCI_RETRY_CNT_MASK 0x0003ff + +#define BRIDGE_TMO_PCI_RETRY_CNT_MAX 0x3ff + +/* + * The NASID should be shifted by this amount and stored into the + * interrupt(x) register. + */ +#define BRIDGE_INT_ADDR_NASID_SHFT 8 + +/* + * The BRIDGE_INT_ADDR_DEST_IO bit should be set to send an interrupt to + * memory. + */ +#define BRIDGE_INT_ADDR_DEST_IO (1 << 17) +#define BRIDGE_INT_ADDR_DEST_MEM 0 +#define BRIDGE_INT_ADDR_MASK (1 << 17) + +/* Bridge device(x) register bits definition */ +#define BRIDGE_DEV_ERR_LOCK_EN 0x10000000 +#define BRIDGE_DEV_PAGE_CHK_DIS 0x08000000 +#define BRIDGE_DEV_FORCE_PCI_PAR 0x04000000 +#define BRIDGE_DEV_VIRTUAL_EN 0x02000000 +#define BRIDGE_DEV_PMU_WRGA_EN 0x01000000 +#define BRIDGE_DEV_DIR_WRGA_EN 0x00800000 +#define BRIDGE_DEV_DEV_SIZE 0x00400000 +#define BRIDGE_DEV_RT 0x00200000 +#define BRIDGE_DEV_SWAP_PMU 0x00100000 +#define BRIDGE_DEV_SWAP_DIR 0x00080000 +#define BRIDGE_DEV_PREF 0x00040000 +#define BRIDGE_DEV_PRECISE 0x00020000 +#define BRIDGE_DEV_COH 0x00010000 +#define BRIDGE_DEV_BARRIER 0x00008000 +#define BRIDGE_DEV_GBR 0x00004000 +#define BRIDGE_DEV_DEV_SWAP 0x00002000 +#define BRIDGE_DEV_DEV_IO_MEM 0x00001000 +#define BRIDGE_DEV_OFF_MASK 0x00000fff +#define BRIDGE_DEV_OFF_ADDR_SHFT 20 + +#define BRIDGE_DEV_PMU_BITS (BRIDGE_DEV_PMU_WRGA_EN | \ + BRIDGE_DEV_SWAP_PMU) +#define BRIDGE_DEV_D32_BITS (BRIDGE_DEV_DIR_WRGA_EN | \ + BRIDGE_DEV_SWAP_DIR | \ + BRIDGE_DEV_PREF | \ + BRIDGE_DEV_PRECISE | \ + BRIDGE_DEV_COH | \ + BRIDGE_DEV_BARRIER) +#define BRIDGE_DEV_D64_BITS (BRIDGE_DEV_DIR_WRGA_EN | \ + BRIDGE_DEV_SWAP_DIR | \ + BRIDGE_DEV_COH | \ + BRIDGE_DEV_BARRIER) + +/* Bridge Error Upper register bit field definition */ +#define BRIDGE_ERRUPPR_DEVMASTER (0x1 << 20) /* Device was master */ +#define BRIDGE_ERRUPPR_PCIVDEV (0x1 << 19) /* Virtual Req value */ +#define BRIDGE_ERRUPPR_DEVNUM_SHFT (16) +#define BRIDGE_ERRUPPR_DEVNUM_MASK (0x7 << BRIDGE_ERRUPPR_DEVNUM_SHFT) +#define BRIDGE_ERRUPPR_DEVICE(err) (((err) >> BRIDGE_ERRUPPR_DEVNUM_SHFT) & 0x7) +#define BRIDGE_ERRUPPR_ADDRMASK (0xFFFF) + +/* Bridge interrupt mode register bits definition */ +#define BRIDGE_INTMODE_CLR_PKT_EN(x) (0x1 << (x)) + +/* this should be written to the xbow's link_control(x) register */ +#define BRIDGE_CREDIT 3 + +/* RRB assignment register */ +#define BRIDGE_RRB_EN 0x8 /* after shifting down */ +#define BRIDGE_RRB_DEV 0x7 /* after shifting down */ +#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */ +#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */ + +/* RRB status register */ +#define BRIDGE_RRB_VALID(r) (0x00010000<<(r)) +#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r)) + +/* RRB clear register */ +#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r)) + +/* xbox system controller declarations */ +#define XBOX_BRIDGE_WID 8 +#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */ +#define XBOX_RPS_EXISTS 1 << 6 /* RPS bit in status register */ +#define XBOX_RPS_FAIL 1 << 4 /* RPS status bit in register */ + +/* ======================================================================== + */ +/* + * Macros for Xtalk to Bridge bus (PCI/GIO) PIO + * refer to section 4.2.1 of Bridge Spec for xtalk to PCI/GIO PIO mappings + */ +/* XTALK addresses that map into Bridge Bus addr space */ +#define BRIDGE_PIO32_XTALK_ALIAS_BASE 0x000040000000L +#define BRIDGE_PIO32_XTALK_ALIAS_LIMIT 0x00007FFFFFFFL +#define BRIDGE_PIO64_XTALK_ALIAS_BASE 0x000080000000L +#define BRIDGE_PIO64_XTALK_ALIAS_LIMIT 0x0000BFFFFFFFL +#define BRIDGE_PCIIO_XTALK_ALIAS_BASE 0x000100000000L +#define BRIDGE_PCIIO_XTALK_ALIAS_LIMIT 0x0001FFFFFFFFL + +/* Ranges of PCI bus space that can be accessed via PIO from xtalk */ +#define BRIDGE_MIN_PIO_ADDR_MEM 0x00000000 /* 1G PCI memory space */ +#define BRIDGE_MAX_PIO_ADDR_MEM 0x3fffffff +#define BRIDGE_MIN_PIO_ADDR_IO 0x00000000 /* 4G PCI IO space */ +#define BRIDGE_MAX_PIO_ADDR_IO 0xffffffff + +/* XTALK addresses that map into PCI addresses */ +#define BRIDGE_PCI_MEM32_BASE BRIDGE_PIO32_XTALK_ALIAS_BASE +#define BRIDGE_PCI_MEM32_LIMIT BRIDGE_PIO32_XTALK_ALIAS_LIMIT +#define BRIDGE_PCI_MEM64_BASE BRIDGE_PIO64_XTALK_ALIAS_BASE +#define BRIDGE_PCI_MEM64_LIMIT BRIDGE_PIO64_XTALK_ALIAS_LIMIT +#define BRIDGE_PCI_IO_BASE BRIDGE_PCIIO_XTALK_ALIAS_BASE +#define BRIDGE_PCI_IO_LIMIT BRIDGE_PCIIO_XTALK_ALIAS_LIMIT + +/* + * Macros for Bridge bus (PCI/GIO) to Xtalk DMA + */ +/* Bridge Bus DMA addresses */ +#define BRIDGE_LOCAL_BASE 0 +#define BRIDGE_DMA_MAPPED_BASE 0x40000000 +#define BRIDGE_DMA_MAPPED_SIZE 0x40000000 /* 1G Bytes */ +#define BRIDGE_DMA_DIRECT_BASE 0x80000000 +#define BRIDGE_DMA_DIRECT_SIZE 0x80000000 /* 2G Bytes */ + +#define PCI32_LOCAL_BASE BRIDGE_LOCAL_BASE + +/* PCI addresses of regions decoded by Bridge for DMA */ +#define PCI32_MAPPED_BASE BRIDGE_DMA_MAPPED_BASE +#define PCI32_DIRECT_BASE BRIDGE_DMA_DIRECT_BASE + +#define IS_PCI32_LOCAL(x) ((ulong_t)(x) < PCI32_MAPPED_BASE) +#define IS_PCI32_MAPPED(x) ((ulong_t)(x) < PCI32_DIRECT_BASE && \ + (ulong_t)(x) >= PCI32_MAPPED_BASE) +#define IS_PCI32_DIRECT(x) ((ulong_t)(x) >= PCI32_MAPPED_BASE) +#define IS_PCI64(x) ((ulong_t)(x) >= PCI64_BASE) + +/* + * The GIO address space. + */ +/* Xtalk to GIO PIO */ +#define BRIDGE_GIO_MEM32_BASE BRIDGE_PIO32_XTALK_ALIAS_BASE +#define BRIDGE_GIO_MEM32_LIMIT BRIDGE_PIO32_XTALK_ALIAS_LIMIT + +#define GIO_LOCAL_BASE BRIDGE_LOCAL_BASE + +/* GIO addresses of regions decoded by Bridge for DMA */ +#define GIO_MAPPED_BASE BRIDGE_DMA_MAPPED_BASE +#define GIO_DIRECT_BASE BRIDGE_DMA_DIRECT_BASE + +#define IS_GIO_LOCAL(x) ((ulong_t)(x) < GIO_MAPPED_BASE) +#define IS_GIO_MAPPED(x) ((ulong_t)(x) < GIO_DIRECT_BASE && \ + (ulong_t)(x) >= GIO_MAPPED_BASE) +#define IS_GIO_DIRECT(x) ((ulong_t)(x) >= GIO_MAPPED_BASE) + +/* PCI to xtalk mapping */ + +/* given a DIR_OFF value and a pci/gio 32 bits direct address, determine + * which xtalk address is accessed + */ +#define BRIDGE_DIRECT_32_SEG_SIZE BRIDGE_DMA_DIRECT_SIZE +#define BRIDGE_DIRECT_32_TO_XTALK(dir_off,adr) \ + ((dir_off) * BRIDGE_DIRECT_32_SEG_SIZE + \ + ((adr) & (BRIDGE_DIRECT_32_SEG_SIZE - 1)) + PHYS_RAMBASE) + +/* 64-bit address attribute masks */ +#define PCI64_ATTR_TARG_MASK 0xf000000000000000 +#define PCI64_ATTR_TARG_SHFT 60 +#define PCI64_ATTR_PREF 0x0800000000000000 +#define PCI64_ATTR_PREC 0x0400000000000000 +#define PCI64_ATTR_VIRTUAL 0x0200000000000000 +#define PCI64_ATTR_BAR 0x0100000000000000 +#define PCI64_ATTR_RMF_MASK 0x00ff000000000000 +#define PCI64_ATTR_RMF_SHFT 48 + +#ifndef __ASSEMBLY__ +/* Address translation entry for mapped pci32 accesses */ +typedef union ate_u { + u64 ent; + struct ate_s { + u64 rmf:16; + u64 addr:36; + u64 targ:4; + u64 reserved:3; + u64 barrier:1; + u64 prefetch:1; + u64 precise:1; + u64 coherent:1; + u64 valid:1; + } field; +} ate_t; +#endif /* !__ASSEMBLY__ */ + +#define ATE_V 0x01 +#define ATE_CO 0x02 +#define ATE_PREC 0x04 +#define ATE_PREF 0x08 +#define ATE_BAR 0x10 + +#define ATE_PFNSHIFT 12 +#define ATE_TIDSHIFT 8 +#define ATE_RMFSHIFT 48 + +#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \ + ((xid)<<ATE_TIDSHIFT) | \ + (attr) + +#define BRIDGE_INTERNAL_ATES 128 + +struct bridge_controller { + struct pci_controller pc; + struct resource mem; + struct resource io; + bridge_t *base; + nasid_t nasid; + unsigned int widget_id; + unsigned int irq_cpu; + dma64_addr_t baddr; + unsigned int pci_int[8]; +}; + +#define BRIDGE_CONTROLLER(bus) \ + ((struct bridge_controller *)((bus)->sysdata)) + +extern void register_bridge_irq(unsigned int irq); +extern int request_bridge_irq(struct bridge_controller *bc); + +extern struct pci_ops bridge_pci_ops; + +#endif /* _ASM_PCI_BRIDGE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/percpu.h b/target/linux/realtek/files/arch/rlx/include/asm/percpu.h new file mode 100644 index 000000000..844e763e9 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +#include <asm-generic/percpu.h> + +#endif /* __ASM_PERCPU_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pgalloc.h b/target/linux/realtek/files/arch/rlx/include/asm/pgalloc.h new file mode 100644 index 000000000..15c206caa --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pgalloc.h @@ -0,0 +1,110 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_PGALLOC_H +#define _ASM_PGALLOC_H + +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/sched.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} +#define pmd_pgtable(pmd) pmd_page(pmd) + +/* + * Initialize a new pmd table with invalid pointers. + */ +extern void pmd_init(unsigned long page, unsigned long pagetable); + +/* + * Initialize a new pgd / pmd table with invalid pointers. + */ +extern void pgd_init(unsigned long page); + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); + + return pte; +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); + if (pte) { + clear_highpage(pte); + pgtable_page_ctor(pte); + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_pages((unsigned long)pte, PTE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_pages(pte, PTE_ORDER); +} + +#define __pte_free_tlb(tlb,pte) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pgd, so has no extra memory associated with it. + */ +#define pmd_free(mm, x) do { } while (0) +#define __pmd_free_tlb(tlb, x) do { } while (0) + +#define check_pgt_cache() do { } while (0) + +extern void pagetable_init(void); + +#endif /* _ASM_PGALLOC_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pgtable-32.h b/target/linux/realtek/files/arch/rlx/include/asm/pgtable-32.h new file mode 100644 index 000000000..f2a1751ef --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pgtable-32.h @@ -0,0 +1,159 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_PGTABLE_32_H +#define _ASM_PGTABLE_32_H + +#include <asm/addrspace.h> +#include <asm/page.h> + +#include <linux/linkage.h> +#include <asm/cachectl.h> +#include <asm/fixmap.h> + +#include <asm-generic/pgtable-nopmd.h> + +/* + * - add_wired_entry() add a fixed TLB entry, and move wired register + */ +extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask); + +/* + * - add_temporary_entry() add a temporary TLB entry. We use TLB entries + * starting at the top and working down. This is for populating the + * TLB before trap_init() puts the TLB miss handler in place. It + * should be used only for entries matching the actual page tables, + * to prevent inconsistencies. + */ +extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask); + + +/* Basically we have the same two-level (which is the logical three level + * Linux page table layout folded) page tables as the i386. Some day + * when we have proper page coloring support we can have a 1% quicker + * tlb refill handling mechanism, but for now it is a bit slower but + * works even with the cache aliasing problem the R4k and above have. + */ + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * Entries per page directory level: we use two-level, so + * we don't really have any PUD/PMD directory physically. + */ +#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) +#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) +#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PMD_ORDER 1 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) +#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +#define VMALLOC_START MAP_BASE + +#ifdef CONFIG_HIGHMEM +# define PKMAP_BASE (0xfe000000UL) +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#endif + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern void load_pgd(unsigned long pg_dir); + +extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; + +/* + * Empty pgd/pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long) invalid_pte_table; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != (unsigned long) invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); +} + +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define __pgd_offset(address) pgd_index(address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_offset_map_nested(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_unmap(pte) ((void)(pte)) +#define pte_unmap_nested(pte) ((void)(pte)) + +/* Swap entries must have VALID bit cleared. */ +#define __swp_type(x) (((x).val >> 10) & 0x1f) +#define __swp_offset(x) ((x).val >> 15) +#define __swp_entry(type,offset) \ + ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) + +/* + * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: + */ +#define PTE_FILE_MAX_BITS 28 + +#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ + (((_pte).pte >> 2 ) & 0x38) | \ + (((_pte).pte >> 10) << 6 )) + +#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ + (((off) & 0x38) << 2 ) | \ + (((off) >> 6 ) << 10) | \ + _PAGE_FILE }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#endif /* _ASM_PGTABLE_32_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pgtable-bits.h b/target/linux/realtek/files/arch/rlx/include/asm/pgtable-bits.h new file mode 100644 index 000000000..511421f29 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pgtable-bits.h @@ -0,0 +1,61 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 2002 by Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef _ASM_PGTABLE_BITS_H +#define _ASM_PGTABLE_BITS_H + + +/* + * Note that we shift the lower 32bits of each EntryLo[01] entry + * 6 bits to the left. That way we can convert the PFN into the + * physical address by a single 'and' operation and gain 6 additional + * bits for storing information which isn't present in a normal + * MIPS page table. + * + * Similar to the Alpha port, we need to keep track of the ref + * and mod bits in software. We have a software "yeah you can read + * from this page" bit, and a hardware one which actually lets the + * process read from the page. On the same token we have a software + * writable bit and the real hardware one which actually lets the + * process write to the page, this keeps a mod bit via the hardware + * dirty bit. + * + * Certain revisions of the R4000 and R5000 have a bug where if a + * certain sequence occurs in the last 3 instructions of an executable + * page, and the following page is not mapped, the cpu can do + * unpredictable things. The code (when it is written) to deal with + * this problem will be in the update_mmu_cache() code for the r4k. + */ +#define _PAGE_PRESENT (1<<0) /* implemented in software */ +#define _PAGE_READ (1<<1) /* implemented in software */ +#define _PAGE_WRITE (1<<2) /* implemented in software */ +#define _PAGE_ACCESSED (1<<3) /* implemented in software */ +#define _PAGE_MODIFIED (1<<4) /* implemented in software */ +#define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */ + +#define _PAGE_GLOBAL (1<<8) +#define _PAGE_VALID (1<<9) +#define _PAGE_SILENT_READ (1<<9) /* synonym */ +#define _PAGE_DIRTY (1<<10) /* The MIPS dirty bit */ +#define _PAGE_SILENT_WRITE (1<<10) +#define _CACHE_UNCACHED (1<<11) +#define _CACHE_MASK (1<<11) + + +/* + * Cache attributes + */ +#define _CACHE_CACHABLE_NONCOHERENT 0 + +#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) + +#endif /* _ASM_PGTABLE_BITS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pgtable.h b/target/linux/realtek/files/arch/rlx/include/asm/pgtable.h new file mode 100644 index 000000000..c3a03b5ec --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pgtable.h @@ -0,0 +1,231 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef _ASM_PGTABLE_H +#define _ASM_PGTABLE_H + +#include <asm/pgtable-32.h> + +#include <asm/io.h> +#include <asm/pgtable-bits.h> + +struct mm_struct; +struct vm_area_struct; + +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _page_cachable_default) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _page_cachable_default) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _page_cachable_default) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _page_cachable_default) +#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _page_cachable_default) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) + +/* + * MIPS can't do page protection for execute, and considers that the same like + * read. Also, write permissions imply read permissions. This is the closest + * we can get by reasonable means.. + */ + +/* + * Dummy values to fill the table in mmap.c + * The real values will be generated at runtime + */ +#define __P000 __pgprot(0) +#define __P001 __pgprot(0) +#define __P010 __pgprot(0) +#define __P011 __pgprot(0) +#define __P100 __pgprot(0) +#define __P101 __pgprot(0) +#define __P110 __pgprot(0) +#define __P111 __pgprot(0) + +#define __S000 __pgprot(0) +#define __S001 __pgprot(0) +#define __S010 __pgprot(0) +#define __S011 __pgprot(0) +#define __S100 __pgprot(0) +#define __S101 __pgprot(0) +#define __S110 __pgprot(0) +#define __S111 __pgprot(0) + +extern unsigned long _page_cachable_default; + +/* + * ZERO_PAGE is a global shared page that is always zero; used + * for zero-mapped memory areas etc.. + */ + +extern unsigned long empty_zero_page; +extern unsigned long zero_page_mask; + +#define ZERO_PAGE(vaddr) \ + (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) + +extern void paging_init(void); + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +/* + * Certain architectures need to do special things when pte's + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +} +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + set_pte_at(mm, addr, ptep, __pte(0)); +} + +/* + * (pmds are folded into puds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) + +#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) +#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) +#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) + +/* + * We used to declare this array with size but gcc 3.3 and older are not able + * to find that this expression is a constant, so the size is dropped. + */ +extern pgd_t swapper_pg_dir[]; + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} + +static inline int pte_special(pte_t pte) { return 0; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +/* + * Macro to make mark a page protection value as "uncacheable". Note + * that "protection" is really a misnomer here as the protection value + * contains the memory attribute bits, dirty bits, and various other + * bits as well. + */ +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; + + return __pgprot(prot); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, + pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); +} + +#define kern_addr_valid(addr) (1) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#include <asm-generic/pgtable.h> + +/* + * We provide our own get_unmapped area to cope with the virtual aliasing + * constraints placed on us by the cache architecture. + */ +#define HAVE_ARCH_UNMAPPED_AREA + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +#endif /* _ASM_PGTABLE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/pmon.h b/target/linux/realtek/files/arch/rlx/include/asm/pmon.h new file mode 100644 index 000000000..6ad519189 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/pmon.h @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 by Ralf Baechle + * + * The cpustart method is a PMC-Sierra's function to start the secondary CPU. + * Stock PMON 2000 has the smpfork, semlock and semunlock methods instead. + */ +#ifndef _ASM_PMON_H +#define _ASM_PMON_H + +struct callvectors { + int (*open) (char*, int, int); + int (*close) (int); + int (*read) (int, void*, int); + int (*write) (int, void*, int); + off_t (*lseek) (int, off_t, int); + int (*printf) (const char*, ...); + void (*cacheflush) (void); + char* (*gets) (char*); + union { + int (*smpfork) (unsigned long cp, char *sp); + int (*cpustart) (long, void (*)(void), void *, long); + } _s; + int (*semlock) (int sem); + void (*semunlock) (int sem); +}; + +extern struct callvectors *debug_vectors; + +#define pmon_open(name, flags, mode) debug_vectors->open(name, flage, mode) +#define pmon_close(fd) debug_vectors->close(fd) +#define pmon_read(fd, buf, count) debug_vectors->read(fd, buf, count) +#define pmon_write(fd, buf, count) debug_vectors->write(fd, buf, count) +#define pmon_lseek(fd, off, whence) debug_vectors->lseek(fd, off, whence) +#define pmon_printf(fmt...) debug_vectors->printf(fmt) +#define pmon_cacheflush() debug_vectors->cacheflush() +#define pmon_gets(s) debug_vectors->gets(s) +#define pmon_cpustart(n, f, sp, gp) debug_vectors->_s.cpustart(n, f, sp, gp) +#define pmon_smpfork(cp, sp) debug_vectors->_s.smpfork(cp, sp) +#define pmon_semlock(sem) debug_vectors->semlock(sem) +#define pmon_semunlock(sem) debug_vectors->semunlock(sem) + +#endif /* _ASM_PMON_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/poll.h b/target/linux/realtek/files/arch/rlx/include/asm/poll.h new file mode 100644 index 000000000..47b952080 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/poll.h @@ -0,0 +1,9 @@ +#ifndef __ASM_POLL_H +#define __ASM_POLL_H + +#define POLLWRNORM POLLOUT +#define POLLWRBAND 0x0100 + +#include <asm-generic/poll.h> + +#endif /* __ASM_POLL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/posix_types.h b/target/linux/realtek/files/arch/rlx/include/asm/posix_types.h new file mode 100644 index 000000000..c200102c8 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/posix_types.h @@ -0,0 +1,144 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 97, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_POSIX_TYPES_H +#define _ASM_POSIX_TYPES_H + +#include <asm/sgidefs.h> + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +#if (_MIPS_SZLONG == 32) +typedef unsigned long __kernel_nlink_t; +#endif +#if (_MIPS_SZLONG == 64) +typedef unsigned int __kernel_nlink_t; +#endif +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef int __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +#if (_MIPS_SZLONG == 32) +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +#endif +#if (_MIPS_SZLONG == 64) +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; +#endif +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef long __kernel_daddr_t; +typedef char * __kernel_caddr_t; + +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; +typedef __kernel_uid_t __kernel_old_uid_t; +typedef __kernel_gid_t __kernel_old_gid_t; +typedef unsigned int __kernel_old_dev_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if (_MIPS_SZLONG == 32) + long val[2]; +#endif +#if (_MIPS_SZLONG == 64) + int val[2]; +#endif +} __kernel_fsid_t; + +#if defined(__KERNEL__) + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + __tmp[ 8] = 0; __tmp[ 9] = 0; + __tmp[10] = 0; __tmp[11] = 0; + __tmp[12] = 0; __tmp[13] = 0; + __tmp[14] = 0; __tmp[15] = 0; + return; + + case 8: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + return; + + case 4: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + return; + } + } + __i = __FDSET_LONGS; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +#endif /* defined(__KERNEL__) */ + +#endif /* _ASM_POSIX_TYPES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/processor.h b/target/linux/realtek/files/arch/rlx/include/asm/processor.h new file mode 100644 index 000000000..909c7b868 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/processor.h @@ -0,0 +1,210 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 Waldorf GMBH + * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle + * Copyright (C) 1996 Paul M. Antoine + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PROCESSOR_H +#define _ASM_PROCESSOR_H + +#include <linux/cpumask.h> +#include <linux/threads.h> + +#include <asm/cachectl.h> +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/rlxregs.h> +#include <asm/system.h> + +/* + * Return current * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +/* + * System setup and hardware flags.. + */ +#ifdef CONFIG_CPU_HAS_SLEEP +#define __cpu_wait() \ +{ \ + __asm__ __volatile__( \ + "sleep \n" \ + "nop \n" \ + "nop \n"); \ +} +#else +#define __cpu_wait() \ +{ \ + __asm__ __volatile__( \ + "nop \n" \ + "nop \n" \ + "nop \n" \ + "nop \n" \ + "nop \n" \ + "nop \n"); \ +} +#endif + + #ifdef CONFIG_ARCH_SUSPEND_POSSIBLE +/* this function is refer to nino_wait() in \linux-2.4.18\arch\mips\philips\nino\power.c */ +static inline void cpu_wait(void) +{ +#ifdef CONFIG_RTL8197B_PANA + extern int cpu_suspend_enabled; + if (!cpu_suspend_enabled) + return; +#endif + +/* + * 08-12-2008, due to the WLAN performance is no difference in 10/100 and giga board when gCpuCanSuspend=1 + * (that means CPU always can suspend), the following code is disabled. + * ==> only apply to RTL865X, it still need to check the WLAN throughput in RTL8196B + */ + +#ifdef CONFIG_RTL_8196C //michaelxxx +#define CONFIG_RTL819X_SUSPEND_CHECK_INTERRUPT +#ifdef CONFIG_RTL819X_SUSPEND_CHECK_INTERRUPT + extern int cpu_can_suspend, cpu_can_suspend_check_init; + extern void suspend_check_interrupt_init(void); + + if (cpu_can_suspend_check_init) { + if (!cpu_can_suspend){ + return; + } + } + else { + suspend_check_interrupt_init(); + cpu_can_suspend_check_init = 1; + } +#endif //CONFIG_RTL819X_SUSPEND_CHECK_INTERRUPT +#endif //CONFIG_RTL8196C + + /* We stop the CPU to conserve power */ + __cpu_wait(); +}; +#endif + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE 0x7fff8000UL +#define STACK_TOP TASK_SIZE + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) + +#ifdef __KERNEL__ +#define STACK_TOP_MAX TASK_SIZE +#endif + +#define INIT_CPUMASK { \ + {0,} \ +} + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define ARCH_MIN_TASKALIGN 8 + +struct mips_abi; + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + /* Saved main processor registers. */ + unsigned long reg16; + unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23; + unsigned long reg29, reg30, reg31; + + /* Saved cp0 stuff. */ + unsigned long cp0_status; + + /* Other stuff associated with the thread. */ + unsigned long cp0_badvaddr; /* Last user fault */ + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ + unsigned long error_code; + unsigned long trap_no; + struct mips_abi *abi; +}; + +#define INIT_THREAD { \ + /* \ + * Saved main processor registers \ + */ \ + .reg16 = 0, \ + .reg17 = 0, \ + .reg18 = 0, \ + .reg19 = 0, \ + .reg20 = 0, \ + .reg21 = 0, \ + .reg22 = 0, \ + .reg23 = 0, \ + .reg29 = 0, \ + .reg30 = 0, \ + .reg31 = 0, \ + /* \ + * Saved cp0 stuff \ + */ \ + .cp0_status = 0, \ + /* \ + * Other stuff associated with the process \ + */ \ + .cp0_badvaddr = 0, \ + .cp0_baduaddr = 0, \ + .error_code = 0, \ + .trap_no = 0, \ +} + +struct task_struct; + +/* Free all resources held by a thread. */ +#define release_thread(thread) do { } while(0) + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +extern unsigned long thread_saved_pc(struct task_struct *tsk); + +/* + * Do necessary setup to start up a newly executed thread. + */ +extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp); + +unsigned long get_wchan(struct task_struct *p); + +#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ + THREAD_SIZE - 32 - sizeof(struct pt_regs)) +#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) +#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) + +#define cpu_relax() barrier() + +/* + * Return_address is a replacement for __builtin_return_address(count) + * which on certain architectures cannot reasonably be implemented in GCC + * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). + * Note that __builtin_return_address(x>=1) is forbidden because GCC + * aborts compilation on some CPUs. It's simply not possible to unwind + * some CPU's stackframes. + * + * __builtin_return_address works only for non-leaf functions. We avoid the + * overhead of a function call by forcing the compiler to save the return + * address register on the stack. + */ +#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);}) + +#endif /* _ASM_PROCESSOR_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ptrace.h b/target/linux/realtek/files/arch/rlx/include/asm/ptrace.h new file mode 100644 index 000000000..79054dbee --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ptrace.h @@ -0,0 +1,97 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PTRACE_H +#define _ASM_PTRACE_H + +/* 0 - 31 are integer registers, 32 - 63 are fp registers. */ +#define MMHI 32 +#define MMLO 33 +#define PC 34 +#define BADVADDR 35 +#define STATUS 36 +#define CAUSE 37 + +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { + /* Pad bytes for argument save space on the stack. */ + unsigned long pad0[6]; + + /* Saved main processor registers. */ + unsigned long regs[32]; + + /* Saved special registers. */ + unsigned long hi; + unsigned long lo; + unsigned long cp0_epc; + unsigned long cp0_badvaddr; + unsigned long cp0_status; + unsigned long cp0_cause; + +#ifdef CONFIG_CPU_HAS_RADIAX + unsigned long estatus; + unsigned long ecause; + unsigned long intvec; + /* m0 - m3 */ + unsigned long radiax[27]; +#endif + +} __attribute__ ((aligned (8))); + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +#define PTRACE_OLDSETOPTIONS 21 + +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +/* Calls to trace a 64bit program from a 32bit program. */ +#define PTRACE_PEEKTEXT_3264 0xc0 +#define PTRACE_PEEKDATA_3264 0xc1 +#define PTRACE_POKETEXT_3264 0xc2 +#define PTRACE_POKEDATA_3264 0xc3 +#define PTRACE_GET_THREAD_AREA_3264 0xc4 + +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/types.h> +#include <asm/isadep.h> + +struct task_struct; + +extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); +extern int ptrace_setregs(struct task_struct *child, __s64 __user *data); + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER) + +#define instruction_pointer(regs) ((regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) + +extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); + +extern NORET_TYPE void die(const char *, const struct pt_regs *) ATTRIB_NORET; + +static inline void die_if_kernel(const char *str, const struct pt_regs *regs) +{ + if (unlikely(!user_mode(regs))) + die(str, regs); +} + +#endif + +#endif /* _ASM_PTRACE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/reboot.h b/target/linux/realtek/files/arch/rlx/include/asm/reboot.h new file mode 100644 index 000000000..e48c0bfab --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/reboot.h @@ -0,0 +1,15 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1997, 1999, 2001, 06 by Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_REBOOT_H +#define _ASM_REBOOT_H + +extern void (*_machine_restart)(char *command); +extern void (*_machine_halt)(void); + +#endif /* _ASM_REBOOT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/reg.h b/target/linux/realtek/files/arch/rlx/include/asm/reg.h new file mode 100644 index 000000000..17c64e089 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/reg.h @@ -0,0 +1,68 @@ +/* + * Various register offset definitions for debuggers, core file + * examiners and whatnot. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999 Ralf Baechle + * Copyright (C) 1995, 1999 Silicon Graphics + */ +#ifndef __ASM_MIPS_REG_H +#define __ASM_MIPS_REG_H + + +#define EF_R0 6 +#define EF_R1 7 +#define EF_R2 8 +#define EF_R3 9 +#define EF_R4 10 +#define EF_R5 11 +#define EF_R6 12 +#define EF_R7 13 +#define EF_R8 14 +#define EF_R9 15 +#define EF_R10 16 +#define EF_R11 17 +#define EF_R12 18 +#define EF_R13 19 +#define EF_R14 20 +#define EF_R15 21 +#define EF_R16 22 +#define EF_R17 23 +#define EF_R18 24 +#define EF_R19 25 +#define EF_R20 26 +#define EF_R21 27 +#define EF_R22 28 +#define EF_R23 29 +#define EF_R24 30 +#define EF_R25 31 + +/* + * k0/k1 unsaved + */ +#define EF_R26 32 +#define EF_R27 33 + +#define EF_R28 34 +#define EF_R29 35 +#define EF_R30 36 +#define EF_R31 37 + +/* + * Saved special registers + */ +#define EF_LO 38 +#define EF_HI 39 + +#define EF_CP0_EPC 40 +#define EF_CP0_BADVADDR 41 +#define EF_CP0_STATUS 42 +#define EF_CP0_CAUSE 43 +#define EF_UNUSED0 44 + +#define EF_SIZE 180 + +#endif /* __ASM_MIPS_REG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/regdef.h b/target/linux/realtek/files/arch/rlx/include/asm/regdef.h new file mode 100644 index 000000000..f052f1840 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/regdef.h @@ -0,0 +1,53 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1985 MIPS Computer Systems, Inc. + * Copyright (C) 1994, 95, 99, 2003 by Ralf Baechle + * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_REGDEF_H +#define _ASM_REGDEF_H + +#include <asm/sgidefs.h> + +/* + * Symbolic register names for 32 bit ABI + */ +#define zero $0 /* wired zero */ +#define AT $1 /* assembler temp - uppercase because of ".set at" */ +#define v0 $2 /* return value */ +#define v1 $3 +#define a0 $4 /* argument registers */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define t0 $8 /* caller saved */ +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define s0 $16 /* callee saved */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 /* caller saved */ +#define t9 $25 +#define jp $25 /* PIC jump register */ +#define k0 $26 /* kernel scratch */ +#define k1 $27 +#define gp $28 /* global pointer */ +#define sp $29 /* stack pointer */ +#define fp $30 /* frame pointer */ +#define s8 $30 /* same like fp! */ +#define ra $31 /* return address */ + +#endif /* _ASM_REGDEF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/resource.h b/target/linux/realtek/files/arch/rlx/include/asm/resource.h new file mode 100644 index 000000000..dde8a4f91 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/resource.h @@ -0,0 +1,33 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_RESOURCE_H +#define _ASM_RESOURCE_H + + +/* + * These five resource limit IDs have a MIPS/Linux-specific ordering, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 5 /* max number of open files */ +#define RLIMIT_AS 6 /* address space limit */ +#define RLIMIT_RSS 7 /* max resident set size */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +/* + * SuS says limits have to be unsigned. + * Which makes a ton more sense anyway, + * but we keep the old value on MIPS32, + * for compatibility: + */ +#define RLIM_INFINITY 0x7fffffffUL + +#include <asm-generic/resource.h> + +#endif /* _ASM_RESOURCE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/rlxbsp.h b/target/linux/realtek/files/arch/rlx/include/asm/rlxbsp.h new file mode 100644 index 000000000..68b6e2557 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/rlxbsp.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2006, Realtek Semiconductor Corp. + * + * rlxbsp.h: + * board module interface + * + * Tony Wu (tonywu@realtek.com.tw) + * Oct. 30, 2006 + */ + +#ifndef _RLXBSP_H_ +#define _RLXBSP_H_ + +/* + * Function prototypes + */ +#ifndef _LANGUAGE_ASSEMBLY + +#include <linux/linkage.h> + +/* arch/rlx/bsp/serial.c */ +extern void bsp_serial_init(void); + +/* arch/rlx/bsp/time.c */ +extern void bsp_timer_init(void); +extern void bsp_timer_ack(void); + +/* arch/rlx/kernel/time.c */ +extern int rlx_clockevent_init(int irq); + +#endif + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/rlxregs.h b/target/linux/realtek/files/arch/rlx/include/asm/rlxregs.h new file mode 100644 index 000000000..4b336bb04 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/rlxregs.h @@ -0,0 +1,739 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996, 1997, 2000, 2001 by Ralf Baechle + * Copyright (C) 2000 Silicon Graphics, Inc. + * Modified for further R[236]000 support by Paul M. Antoine, 1996. + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2003, 2004 Maciej W. Rozycki + */ +#ifndef _ASM_RLXREGS_H_ +#define _ASM_RLXREGS_H_ + +#include <linux/linkage.h> +#include <asm/hazards.h> + +/* + * The following macros are especially useful for __asm__ + * inline assembler. + */ +#ifndef __STR +#define __STR(x) #x +#endif +#ifndef STR +#define STR(x) __STR(x) +#endif + +/* + * Configure language + */ +#ifdef __ASSEMBLY__ +#define _ULCAST_ +#else +#define _ULCAST_ (unsigned long) +#endif + +/* + * Coprocessor 0 register names + */ +#define CP0_INDEX $0 +#define CP0_RANDOM $1 +#define CP0_ENTRYLO0 $2 +#define CP0_ENTRYLO1 $3 +#define CP0_CONF $3 +#define CP0_CONTEXT $4 +#define CP0_PAGEMASK $5 +#define CP0_WIRED $6 +#define CP0_INFO $7 +#define CP0_BADVADDR $8 +#define CP0_COUNT $9 +#define CP0_ENTRYHI $10 +#define CP0_COMPARE $11 +#define CP0_STATUS $12 +#define CP0_CAUSE $13 +#define CP0_EPC $14 +#define CP0_PRID $15 +#define CP0_CONFIG $16 +#define CP0_LLADDR $17 +#define CP0_WATCHLO $18 +#define CP0_WATCHHI $19 +#define CP0_XCONTEXT $20 +#define CP0_FRAMEMASK $21 +#define CP0_DIAGNOSTIC $22 +#define CP0_DEBUG $23 +#define CP0_DEPC $24 +#define CP0_PERFORMANCE $25 +#define CP0_ECC $26 +#define CP0_CACHEERR $27 +#define CP0_TAGLO $28 +#define CP0_TAGHI $29 +#define CP0_ERROREPC $30 +#define CP0_DESAVE $31 + +#define PM_4K 0x00000000 +#define PM_DEFAULT_MASK PM_4K +#define PL_4K 12 + +/* + * Setting c0_status.co enables Hit_Writeback and Hit_Writeback_Invalidate + * cacheops in userspace. This bit exists only on RM7000 and RM9000 + * processors. + */ +#define ST0_CO 0x08000000 + +/* + * Bitfields in the R[23]000 cp0 status register. + */ +#define ST0_IEC 0x00000001 +#define ST0_KUC 0x00000002 +#define ST0_IEP 0x00000004 +#define ST0_KUP 0x00000008 +#define ST0_IEO 0x00000010 +#define ST0_KUO 0x00000020 +/* bits 6 & 7 are reserved on R[23]000 */ +#define ST0_ISC 0x00010000 +#define ST0_SWC 0x00020000 +#define ST0_CM 0x00080000 + +/* + * Status register bits available in all MIPS CPUs. + */ +#define ST0_IM 0x0000ff00 +#define STATUSB_IP0 8 +#define STATUSF_IP0 (_ULCAST_(1) << 8) +#define STATUSB_IP1 9 +#define STATUSF_IP1 (_ULCAST_(1) << 9) +#define STATUSB_IP2 10 +#define STATUSF_IP2 (_ULCAST_(1) << 10) +#define STATUSB_IP3 11 +#define STATUSF_IP3 (_ULCAST_(1) << 11) +#define STATUSB_IP4 12 +#define STATUSF_IP4 (_ULCAST_(1) << 12) +#define STATUSB_IP5 13 +#define STATUSF_IP5 (_ULCAST_(1) << 13) +#define STATUSB_IP6 14 +#define STATUSF_IP6 (_ULCAST_(1) << 14) +#define STATUSB_IP7 15 +#define STATUSF_IP7 (_ULCAST_(1) << 15) +#define STATUSB_IP8 0 +#define STATUSF_IP8 (_ULCAST_(1) << 0) +#define STATUSB_IP9 1 +#define STATUSF_IP9 (_ULCAST_(1) << 1) +#define STATUSB_IP10 2 +#define STATUSF_IP10 (_ULCAST_(1) << 2) +#define STATUSB_IP11 3 +#define STATUSF_IP11 (_ULCAST_(1) << 3) +#define STATUSB_IP12 4 +#define STATUSF_IP12 (_ULCAST_(1) << 4) +#define STATUSB_IP13 5 +#define STATUSF_IP13 (_ULCAST_(1) << 5) +#define STATUSB_IP14 6 +#define STATUSF_IP14 (_ULCAST_(1) << 6) +#define STATUSB_IP15 7 +#define STATUSF_IP15 (_ULCAST_(1) << 7) +#define ST0_CH 0x00040000 +#define ST0_SR 0x00100000 +#define ST0_TS 0x00200000 +#define ST0_BEV 0x00400000 +#define ST0_RE 0x02000000 +#define ST0_FR 0x04000000 +#define ST0_CU 0xf0000000 +#define ST0_CU0 0x10000000 +#define ST0_CU1 0x20000000 +#define ST0_CU2 0x40000000 +#define ST0_CU3 0x80000000 +#define ST0_XX 0x80000000 /* MIPS IV naming */ + +/* + * Bitfields and bit numbers in the coprocessor 0 cause register. + * + * Refer to your MIPS R4xx0 manual, chapter 5 for explanation. + */ +#define CAUSEB_EXCCODE 2 +#define CAUSEF_EXCCODE (_ULCAST_(31) << 2) +#define CAUSEB_IP 8 +#define CAUSEF_IP (_ULCAST_(255) << 8) +#define CAUSEB_IP0 8 +#define CAUSEF_IP0 (_ULCAST_(1) << 8) +#define CAUSEB_IP1 9 +#define CAUSEF_IP1 (_ULCAST_(1) << 9) +#define CAUSEB_IP2 10 +#define CAUSEF_IP2 (_ULCAST_(1) << 10) +#define CAUSEB_IP3 11 +#define CAUSEF_IP3 (_ULCAST_(1) << 11) +#define CAUSEB_IP4 12 +#define CAUSEF_IP4 (_ULCAST_(1) << 12) +#define CAUSEB_IP5 13 +#define CAUSEF_IP5 (_ULCAST_(1) << 13) +#define CAUSEB_IP6 14 +#define CAUSEF_IP6 (_ULCAST_(1) << 14) +#define CAUSEB_IP7 15 +#define CAUSEF_IP7 (_ULCAST_(1) << 15) +#define CAUSEB_IV 23 +#define CAUSEF_IV (_ULCAST_(1) << 23) +#define CAUSEB_CE 28 +#define CAUSEF_CE (_ULCAST_(3) << 28) +#define CAUSEB_BD 31 +#define CAUSEF_BD (_ULCAST_(1) << 31) + +#ifndef __ASSEMBLY__ + +/* + * Macros to access the system control coprocessor + */ + +#if defined(CONFIG_CPU_RLX5281) || defined(CONFIG_CPU_RLX4281) +#define __read_32bit_c0_register(source, sel) \ +({ int __res; \ + if (sel == 0) \ + __asm__ __volatile__( \ + "mfc0\t%0, " #source "\n\t" \ + : "=r" (__res)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips32\n\t" \ + "mfc0\t%0, " #source ", " #sel "\n\t" \ + ".set\tmips0\n\t" \ + : "=r" (__res)); \ + __res; \ +}) +#else +#define __read_32bit_c0_register(source, sel) \ +({ int __res; \ + __asm__ __volatile__( \ + "mfc0\t%0, " #source "\n\t" \ + : "=r" (__res)); \ + __res; \ +}) +#endif + +#if defined(CONFIG_CPU_RLX5281) || defined(CONFIG_CPU_RLX4281) +#define __write_32bit_c0_register(register, sel, value) \ +do { \ + if (sel == 0) \ + __asm__ __volatile__( \ + "mtc0\t%z0, " #register "\n\t" \ + : : "Jr" ((unsigned int)(value))); \ + else \ + __asm__ __volatile__( \ + ".set\tmips32\n\t" \ + "mtc0\t%z0, " #register ", " #sel "\n\t" \ + ".set\tmips0" \ + : : "Jr" ((unsigned int)(value))); \ +} while (0) +#else +#define __write_32bit_c0_register(register, sel, value) \ +do { \ + __asm__ __volatile__( \ + "mtc0\t%z0, " #register "\n\t" \ + : : "Jr" ((unsigned int)(value))); \ +} while (0) +#endif + +#define __read_ulong_c0_register(reg, sel) \ + (unsigned long) __read_32bit_c0_register(reg, sel) + +#define __write_ulong_c0_register(reg, sel, val) \ +do { \ + __write_32bit_c0_register(reg, sel, val); \ +} while (0) + +#define read_c0_index() __read_32bit_c0_register($0, 0) +#define write_c0_index(val) __write_32bit_c0_register($0, 0, val) + +#define read_c0_entrylo0() __read_ulong_c0_register($2, 0) +#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val) + +#define read_c0_entrylo1() __read_ulong_c0_register($3, 0) +#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val) + +#define read_c0_conf() __read_32bit_c0_register($3, 0) +#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) + +#define read_c0_context() __read_ulong_c0_register($4, 0) +#define write_c0_context(val) __write_ulong_c0_register($4, 0, val) + +#define read_c0_pagemask() __read_32bit_c0_register($5, 0) +#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) + +#define read_c0_wired() __read_32bit_c0_register($6, 0) +#define write_c0_wired(val) __write_32bit_c0_register($6, 0, val) + +#define read_c0_info() __read_32bit_c0_register($7, 0) + +#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ +#define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) + +#define read_c0_badvaddr() __read_ulong_c0_register($8, 0) +#define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val) + +#define read_c0_count() __read_32bit_c0_register($9, 0) +#define write_c0_count(val) __write_32bit_c0_register($9, 0, val) + +#define read_c0_count2() __read_32bit_c0_register($9, 6) /* pnx8550 */ +#define write_c0_count2(val) __write_32bit_c0_register($9, 6, val) + +#define read_c0_count3() __read_32bit_c0_register($9, 7) /* pnx8550 */ +#define write_c0_count3(val) __write_32bit_c0_register($9, 7, val) + +#define read_c0_entryhi() __read_ulong_c0_register($10, 0) +#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val) + +#define read_c0_compare() __read_32bit_c0_register($11, 0) +#define write_c0_compare(val) __write_32bit_c0_register($11, 0, val) + +#define read_c0_compare2() __read_32bit_c0_register($11, 6) /* pnx8550 */ +#define write_c0_compare2(val) __write_32bit_c0_register($11, 6, val) + +#define read_c0_compare3() __read_32bit_c0_register($11, 7) /* pnx8550 */ +#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) + +#define read_c0_status() __read_32bit_c0_register($12, 0) + +#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) + +#define read_c0_cause() __read_32bit_c0_register($13, 0) +#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) + +#define read_c0_epc() __read_ulong_c0_register($14, 0) +#define write_c0_epc(val) __write_ulong_c0_register($14, 0, val) + +#define read_c0_prid() __read_32bit_c0_register($15, 0) + +#define read_c0_xcontext() __read_ulong_c0_register($20, 0) +#define write_c0_xcontext(val) __write_ulong_c0_register($20, 0, val) + +#define read_c0_intcontrol() __read_32bit_c0_ctrl_register($20) +#define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val) + +#define read_c0_framemask() __read_32bit_c0_register($21, 0) +#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val) + +#define mfhi0() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mfhi %0, $ac0 \n" \ + " .word 0x00000810 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mfhi1() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mfhi %0, $ac1 \n" \ + " .word 0x00200810 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mfhi2() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mfhi %0, $ac2 \n" \ + " .word 0x00400810 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mfhi3() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mfhi %0, $ac3 \n" \ + " .word 0x00600810 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mflo0() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mflo %0, $ac0 \n" \ + " .word 0x00000812 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mflo1() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mflo %0, $ac1 \n" \ + " .word 0x00200812 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mflo2() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mflo %0, $ac2 \n" \ + " .word 0x00400812 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mflo3() \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # mflo %0, $ac3 \n" \ + " .word 0x00600812 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg)); \ + __treg; \ +}) + +#define mthi0(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mthi $1, $ac0 \n" \ + " .word 0x00200011 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mthi1(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mthi $1, $ac1 \n" \ + " .word 0x00200811 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mthi2(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mthi $1, $ac2 \n" \ + " .word 0x00201011 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mthi3(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mthi $1, $ac3 \n" \ + " .word 0x00201811 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mtlo0(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mtlo $1, $ac0 \n" \ + " .word 0x00200013 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mtlo1(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mtlo $1, $ac1 \n" \ + " .word 0x00200813 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mtlo2(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mtlo $1, $ac2 \n" \ + " .word 0x00201013 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +#define mtlo3(x) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # mtlo $1, $ac3 \n" \ + " .word 0x00201813 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +} while (0) + +/* + * TLB operations. + * + * It is responsibility of the caller to take care of any TLB hazards. + */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbp\n\t" + ".set reorder"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbr\n\t" + ".set reorder"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwi\n\t" + ".set reorder"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwr\n\t" + ".set reorder"); +} + +/* + * Manipulate bits in a c0 register. + */ +#define __BUILD_SET_C0(name) \ +static inline unsigned int \ +set_c0_##name(unsigned int set) \ +{ \ + unsigned int res; \ + \ + res = read_c0_##name(); \ + res |= set; \ + write_c0_##name(res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +clear_c0_##name(unsigned int clear) \ +{ \ + unsigned int res; \ + \ + res = read_c0_##name(); \ + res &= ~clear; \ + write_c0_##name(res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +change_c0_##name(unsigned int change, unsigned int new) \ +{ \ + unsigned int res; \ + \ + res = read_c0_##name(); \ + res &= ~change; \ + res |= (new & change); \ + write_c0_##name(res); \ + \ + return res; \ +} + +__BUILD_SET_C0(status) +__BUILD_SET_C0(cause) + +#endif /* !__ASSEMBLY__ */ + +/* + * RLX CP0 register names + */ +#define LXCP0_ESTATUS $0 +#define LXCP0_ECAUSE $1 +#define LXCP0_INTVEC $2 + +#define LXCP0_CCTL $20 +#define CCTL_DInval 0x00000001 +#define CCTL_IInval 0x00000002 +#define CCTL_IMEM0FILL 0x00000010 +#define CCTL_IMEM0OFF 0x00000020 +#define CCTL_IMEM0ON 0x00000040 +#define CCTL_DWB 0x00000100 +#define CCTL_DWBInval 0x00000200 +#define CCTL_DMEM0ON 0x00000400 +#define CCTL_DMEM0OFF 0x00000800 + +/* + * RLX status register bits + */ +#define EST0_IM 0x00ff0000 +#define ESTATUSF_IP0 (_ULCAST_(1) << 16) +#define ESTATUSF_IP1 (_ULCAST_(1) << 17) +#define ESTATUSF_IP2 (_ULCAST_(1) << 18) +#define ESTATUSF_IP3 (_ULCAST_(1) << 19) +#define ESTATUSF_IP4 (_ULCAST_(1) << 20) +#define ESTATUSF_IP5 (_ULCAST_(1) << 21) +#define ESTATUSF_IP6 (_ULCAST_(1) << 22) +#define ESTATUSF_IP7 (_ULCAST_(1) << 23) + +#define ECAUSEF_IP (_ULCAST_(255) << 16) +#define ECAUSEF_IP0 (_ULCAST_(1) << 16) +#define ECAUSEF_IP1 (_ULCAST_(1) << 17) +#define ECAUSEF_IP2 (_ULCAST_(1) << 18) +#define ECAUSEF_IP3 (_ULCAST_(1) << 19) +#define ECAUSEF_IP4 (_ULCAST_(1) << 20) +#define ECAUSEF_IP5 (_ULCAST_(1) << 21) +#define ECAUSEF_IP6 (_ULCAST_(1) << 22) +#define ECAUSEF_IP7 (_ULCAST_(1) << 23) + +#ifndef __ASSEMBLY__ + +/* + * Macros to access the system control coprocessor + */ + +#define __read_32bit_lxc0_register(source) \ +({ int __res; \ + __asm__ __volatile__( \ + "mflxc0\t%0, " #source "\n\t" \ + : "=r" (__res)); \ + __res; \ +}) + +#define __write_32bit_lxc0_register(register, value) \ +do { \ + __asm__ __volatile__( \ + "mtlxc0\t%z0, " #register "\n\t" \ + : : "Jr" ((unsigned int)(value))); \ +} while (0) + + +#define read_lxc0_estatus() __read_32bit_lxc0_register($0) +#define read_lxc0_ecause() __read_32bit_lxc0_register($1) +#define read_lxc0_intvec() __read_32bit_lxc0_register($2) +#define write_lxc0_estatus(val) __write_32bit_lxc0_register($0, val) +#define write_lxc0_ecause(val) __write_32bit_lxc0_register($1, val) +#define write_lxc0_intvec(val) __write_32bit_lxc0_register($2, val) + +/* + * Manipulate bits in a lxc0 register. + */ +#define __BUILD_SET_LXC0(name) \ +static inline unsigned int \ +set_lxc0_##name(unsigned int set) \ +{ \ + unsigned int res; \ + \ + res = read_lxc0_##name(); \ + res |= set; \ + write_lxc0_##name(res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +clear_lxc0_##name(unsigned int clear) \ +{ \ + unsigned int res; \ + \ + res = read_lxc0_##name(); \ + res &= ~clear; \ + write_lxc0_##name(res); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +change_lxc0_##name(unsigned int change, unsigned int new) \ +{ \ + unsigned int res; \ + \ + res = read_lxc0_##name(); \ + res &= ~change; \ + res |= (new & change); \ + write_lxc0_##name(res); \ + \ + return res; \ +} + +__BUILD_SET_LXC0(intvec) +__BUILD_SET_LXC0(estatus) +__BUILD_SET_LXC0(ecause) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_RLXREGS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/scatterlist.h b/target/linux/realtek/files/arch/rlx/include/asm/scatterlist.h new file mode 100644 index 000000000..83d69fe17 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/scatterlist.h @@ -0,0 +1,28 @@ +#ifndef __ASM_SCATTERLIST_H +#define __ASM_SCATTERLIST_H + +#include <asm/types.h> + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + dma_addr_t dma_address; + unsigned int length; +}; + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->length) + +#define ISA_DMA_THRESHOLD (0x00ffffffUL) + +#endif /* __ASM_SCATTERLIST_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/seccomp.h b/target/linux/realtek/files/arch/rlx/include/asm/seccomp.h new file mode 100644 index 000000000..ae6306ebd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/seccomp.h @@ -0,0 +1,32 @@ +#ifndef __ASM_SECCOMP_H + +#include <linux/unistd.h> + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_rt_sigreturn + +/* + * Kludge alert: + * + * The generic seccomp code currently allows only a single compat ABI. Until + * this is fixed we priorize O32 as the compat ABI over N32. + */ +#ifdef CONFIG_MIPS32_O32 + +#define __NR_seccomp_read_32 4003 +#define __NR_seccomp_write_32 4004 +#define __NR_seccomp_exit_32 4001 +#define __NR_seccomp_sigreturn_32 4193 /* rt_sigreturn */ + +#elif defined(CONFIG_MIPS32_N32) + +#define __NR_seccomp_read_32 6000 +#define __NR_seccomp_write_32 6001 +#define __NR_seccomp_exit_32 6058 +#define __NR_seccomp_sigreturn_32 6211 /* rt_sigreturn */ + +#endif /* CONFIG_MIPS32_O32 */ + +#endif /* __ASM_SECCOMP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sections.h b/target/linux/realtek/files/arch/rlx/include/asm/sections.h new file mode 100644 index 000000000..b7e37262c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sections.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SECTIONS_H +#define _ASM_SECTIONS_H + +#include <asm-generic/sections.h> + +#endif /* _ASM_SECTIONS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/segment.h b/target/linux/realtek/files/arch/rlx/include/asm/segment.h new file mode 100644 index 000000000..92ac001fc --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/segment.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SEGMENT_H +#define _ASM_SEGMENT_H + +/* Only here because we have some old header files that expect it.. */ + +#endif /* _ASM_SEGMENT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sembuf.h b/target/linux/realtek/files/arch/rlx/include/asm/sembuf.h new file mode 100644 index 000000000..7281a4dec --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _ASM_SEMBUF_H +#define _ASM_SEMBUF_H + +/* + * The semid64_ds structure for the MIPS architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ASM_SEMBUF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/serial.h b/target/linux/realtek/files/arch/rlx/include/asm/serial.h new file mode 100644 index 000000000..c07ebd8eb --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/serial.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_SERIAL_H +#define _ASM_SERIAL_H + + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ +#define BASE_BAUD (1843200 / 16) + +#endif /* _ASM_SERIAL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/setup.h b/target/linux/realtek/files/arch/rlx/include/asm/setup.h new file mode 100644 index 000000000..e600cedda --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/setup.h @@ -0,0 +1,10 @@ +#ifndef _MIPS_SETUP_H +#define _MIPS_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +#ifdef __KERNEL__ +extern void setup_early_printk(void); +#endif /* __KERNEL__ */ + +#endif /* __SETUP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sgidefs.h b/target/linux/realtek/files/arch/rlx/include/asm/sgidefs.h new file mode 100644 index 000000000..876442fcf --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sgidefs.h @@ -0,0 +1,44 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1999, 2001 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef __ASM_SGIDEFS_H +#define __ASM_SGIDEFS_H + +/* + * Using a Linux compiler for building Linux seems logic but not to + * everybody. + */ +#ifndef __linux__ +#error Use a Linux compiler or give up. +#endif + +/* + * Definitions for the ISA levels + * + * With the introduction of MIPS32 / MIPS64 instruction sets definitions + * MIPS ISAs are no longer subsets of each other. Therefore comparisons + * on these symbols except with == may result in unexpected results and + * are forbidden! + */ +#define _MIPS_ISA_MIPS1 1 +#define _MIPS_ISA_MIPS2 2 +#define _MIPS_ISA_MIPS3 3 +#define _MIPS_ISA_MIPS4 4 +#define _MIPS_ISA_MIPS5 5 +#define _MIPS_ISA_MIPS32 6 +#define _MIPS_ISA_MIPS64 7 + +/* + * Subprogram calling convention + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 + +#endif /* __ASM_SGIDEFS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/shmbuf.h b/target/linux/realtek/files/arch/rlx/include/asm/shmbuf.h new file mode 100644 index 000000000..f99443827 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/shmbuf.h @@ -0,0 +1,38 @@ +#ifndef _ASM_SHMBUF_H +#define _ASM_SHMBUF_H + +/* + * The shmid64_ds structure for the MIPS architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 32-bit rsp. 64-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _ASM_SHMBUF_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/shmparam.h b/target/linux/realtek/files/arch/rlx/include/asm/shmparam.h new file mode 100644 index 000000000..092907207 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/shmparam.h @@ -0,0 +1,13 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_SHMPARAM_H +#define _ASM_SHMPARAM_H + +#define __ARCH_FORCE_SHMLBA 1 + +#define SHMLBA 0x40000 /* attach addr a multiple of this */ + +#endif /* _ASM_SHMPARAM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sigcontext.h b/target/linux/realtek/files/arch/rlx/include/asm/sigcontext.h new file mode 100644 index 000000000..728d7ca10 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sigcontext.h @@ -0,0 +1,28 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1999 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_SIGCONTEXT_H +#define _ASM_SIGCONTEXT_H + +#include <linux/types.h> +#include <asm/sgidefs.h> + +/* + * Keep this struct definition in sync with the sigcontext fragment + * in arch/mips/tools/offset.c + */ +struct sigcontext { + unsigned int sc_regmask; /* Unused */ + unsigned int sc_status; /* Unused */ + unsigned long long sc_pc; + unsigned long long sc_regs[32]; + unsigned long long sc_mdhi; + unsigned long long sc_mdlo; +}; + +#endif /* _ASM_SIGCONTEXT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/siginfo.h b/target/linux/realtek/files/arch/rlx/include/asm/siginfo.h new file mode 100644 index 000000000..e80422213 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/siginfo.h @@ -0,0 +1,127 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_SIGINFO_H +#define _ASM_SIGINFO_H + + +#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int)) +#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */ + +#define HAVE_ARCH_SIGINFO_T + +/* + * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked + * by design ... + */ +#define HAVE_ARCH_COPY_SIGINFO +struct siginfo; + +/* + * Careful to keep union _sifields from shifting ... + */ +#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) + +#include <asm-generic/siginfo.h> + +typedef struct siginfo { + int si_signo; + int si_code; + int si_errno; + int __pad0[SI_MAX_SIZE / sizeof(int) - SI_PAD_SIZE - 3]; + + union { + int _pad[SI_PAD_SIZE]; + + /* kill() */ + struct { + pid_t _pid; /* sender's pid */ + __ARCH_SI_UID_T _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; + sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + pid_t _pid; /* sender's pid */ + __ARCH_SI_UID_T _uid; /* sender's uid */ + sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + pid_t _pid; /* which child */ + __ARCH_SI_UID_T _uid; /* sender's uid */ + int _status; /* exit code */ + clock_t _utime; + clock_t _stime; + } _sigchld; + +#if 0 + /* IRIX SIGCHLD */ + struct { + pid_t _pid; /* which child */ + clock_t _utime; + int _status; /* exit code */ + clock_t _stime; + } _irix_sigchld; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + void __user *_addr; /* faulting insn/memory ref. */ +#ifdef __ARCH_SI_TRAPNO + int _trapno; /* TRAP # which caused the signal */ +#endif + } _sigfault; + + /* SIGPOLL, SIGXFSZ (To do ...) */ + struct { + __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + } _sifields; +} siginfo_t; + +/* + * si_code values + * Again these have been choosen to be IRIX compatible. + */ +#undef SI_ASYNCIO +#undef SI_TIMER +#undef SI_MESGQ +#define SI_ASYNCIO -2 /* sent by AIO completion */ +#define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ +#define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ + +#ifdef __KERNEL__ + +/* + * Duplicated here because of <asm-generic/siginfo.h> braindamage ... + */ +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); +} + +#endif + +#endif /* _ASM_SIGINFO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/signal.h b/target/linux/realtek/files/arch/rlx/include/asm/signal.h new file mode 100644 index 000000000..c783f3649 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/signal.h @@ -0,0 +1,139 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_SIGNAL_H +#define _ASM_SIGNAL_H + +#include <linux/types.h> + +#define _NSIG 128 +#define _NSIG_BPW (sizeof(unsigned long) * 8) +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +#define SIGHUP 1 /* Hangup (POSIX). */ +#define SIGINT 2 /* Interrupt (ANSI). */ +#define SIGQUIT 3 /* Quit (POSIX). */ +#define SIGILL 4 /* Illegal instruction (ANSI). */ +#define SIGTRAP 5 /* Trace trap (POSIX). */ +#define SIGIOT 6 /* IOT trap (4.2 BSD). */ +#define SIGABRT SIGIOT /* Abort (ANSI). */ +#define SIGEMT 7 +#define SIGFPE 8 /* Floating-point exception (ANSI). */ +#define SIGKILL 9 /* Kill, unblockable (POSIX). */ +#define SIGBUS 10 /* BUS error (4.2 BSD). */ +#define SIGSEGV 11 /* Segmentation violation (ANSI). */ +#define SIGSYS 12 +#define SIGPIPE 13 /* Broken pipe (POSIX). */ +#define SIGALRM 14 /* Alarm clock (POSIX). */ +#define SIGTERM 15 /* Termination (ANSI). */ +#define SIGUSR1 16 /* User-defined signal 1 (POSIX). */ +#define SIGUSR2 17 /* User-defined signal 2 (POSIX). */ +#define SIGCHLD 18 /* Child status has changed (POSIX). */ +#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */ +#define SIGPWR 19 /* Power failure restart (System V). */ +#define SIGWINCH 20 /* Window size change (4.3 BSD, Sun). */ +#define SIGURG 21 /* Urgent condition on socket (4.2 BSD). */ +#define SIGIO 22 /* I/O now possible (4.2 BSD). */ +#define SIGPOLL SIGIO /* Pollable event occurred (System V). */ +#define SIGSTOP 23 /* Stop, unblockable (POSIX). */ +#define SIGTSTP 24 /* Keyboard stop (POSIX). */ +#define SIGCONT 25 /* Continue (POSIX). */ +#define SIGTTIN 26 /* Background read from tty (POSIX). */ +#define SIGTTOU 27 /* Background write to tty (POSIX). */ +#define SIGVTALRM 28 /* Virtual alarm clock (4.2 BSD). */ +#define SIGPROF 29 /* Profiling alarm clock (4.2 BSD). */ +#define SIGXCPU 30 /* CPU limit exceeded (4.2 BSD). */ +#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */ + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_ONSTACK 0x08000000 +#define SA_RESETHAND 0x80000000 +#define SA_RESTART 0x10000000 +#define SA_SIGINFO 0x00000008 +#define SA_NODEFER 0x40000000 +#define SA_NOCLDWAIT 0x00010000 +#define SA_NOCLDSTOP 0x00000001 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND + +#define SA_RESTORER 0x04000000 /* Only for o32 */ + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#ifdef __KERNEL__ + +#ifdef CONFIG_TRAD_SIGNALS +#define sig_uses_siginfo(ka) ((ka)->sa.sa_flags & SA_SIGINFO) +#else +#define sig_uses_siginfo(ka) (1) +#endif + +#endif /* __KERNEL__ */ + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include <asm-generic/signal-defs.h> + +struct sigaction { + unsigned int sa_flags; + __sighandler_t sa_handler; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +/* IRIX compatible stack_t */ +typedef struct sigaltstack { + void __user *ss_sp; + size_t ss_size; + int ss_flags; +} stack_t; + +#ifdef __KERNEL__ +#include <asm/sigcontext.h> +#include <asm/siginfo.h> + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SIGNAL_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sim.h b/target/linux/realtek/files/arch/rlx/include/asm/sim.h new file mode 100644 index 000000000..6d110e966 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sim.h @@ -0,0 +1,42 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999, 2000, 2003 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_SIM_H +#define _ASM_SIM_H + + +#include <asm/asm-offsets.h> + +#define __str2(x) #x +#define __str(x) __str2(x) + +#define save_static_function(symbol) \ +__asm__( \ + ".text\n\t" \ + ".globl\t" #symbol "\n\t" \ + ".align\t2\n\t" \ + ".type\t" #symbol ", @function\n\t" \ + ".ent\t" #symbol ", 0\n" \ + #symbol":\n\t" \ + ".frame\t$29, 0, $31\n\t" \ + "sw\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \ + "sw\t$17,"__str(PT_R17)"($29)\n\t" \ + "sw\t$18,"__str(PT_R18)"($29)\n\t" \ + "sw\t$19,"__str(PT_R19)"($29)\n\t" \ + "sw\t$20,"__str(PT_R20)"($29)\n\t" \ + "sw\t$21,"__str(PT_R21)"($29)\n\t" \ + "sw\t$22,"__str(PT_R22)"($29)\n\t" \ + "sw\t$23,"__str(PT_R23)"($29)\n\t" \ + "sw\t$30,"__str(PT_R30)"($29)\n\t" \ + "j\t_" #symbol "\n\t" \ + ".end\t" #symbol "\n\t" \ + ".size\t" #symbol",. - " #symbol) + +#define nabi_no_regargs + +#endif /* _ASM_SIM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/smp-ops.h b/target/linux/realtek/files/arch/rlx/include/asm/smp-ops.h new file mode 100644 index 000000000..04d59dccd --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/smp-ops.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) + * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002 Ralf Baechle + * Copyright (C) 2000, 2001 Broadcom Corporation + */ +#ifndef __ASM_SMP_OPS_H +#define __ASM_SMP_OPS_H + +struct plat_smp_ops; + +static inline void register_smp_ops(struct plat_smp_ops *ops) +{ +} + +extern struct plat_smp_ops up_smp_ops; +extern struct plat_smp_ops cmp_smp_ops; +extern struct plat_smp_ops vsmp_smp_ops; + +#endif /* __ASM_SMP_OPS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/smp.h b/target/linux/realtek/files/arch/rlx/include/asm/smp.h new file mode 100644 index 000000000..2f83fa863 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/smp.h @@ -0,0 +1,64 @@ +/* + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) + * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002 Ralf Baechle + * Copyright (C) 2000, 2001 Broadcom Corporation + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include <linux/bitops.h> +#include <linux/linkage.h> +#include <linux/smp.h> +#include <linux/threads.h> +#include <linux/cpumask.h> + +#include <asm/atomic.h> +#include <asm/smp-ops.h> + +extern int smp_num_siblings; +extern cpumask_t cpu_sibling_map[]; + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +/* Map from cpu id to sequential logical cpu number. This will only + not be idempotent when cpus failed to come on-line. */ +extern int __cpu_number_map[NR_CPUS]; +#define cpu_number_map(cpu) __cpu_number_map[cpu] + +/* The reverse map from sequential logical cpu number to cpu id. */ +extern int __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +#define NO_PROC_ID (-1) + +#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ +#define SMP_CALL_FUNCTION 0x2 +/* Octeon - Tell another core to flush its icache */ +#define SMP_ICACHE_FLUSH 0x4 + + +extern void asmlinkage smp_bootstrap(void); + +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +static inline void smp_send_reschedule(int cpu) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); +} + +extern asmlinkage void smp_call_function_interrupt(void); + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + +#endif /* __ASM_SMP_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/socket.h b/target/linux/realtek/files/arch/rlx/include/asm/socket.h new file mode 100644 index 000000000..2abca1780 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/socket.h @@ -0,0 +1,120 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1997, 1999, 2000, 2001 Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +#include <asm/sockios.h> + +/* + * For setsockopt(2) + * + * This defines are ABI conformant as far as Linux supports these ... + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 /* Record debugging information. */ +#define SO_REUSEADDR 0x0004 /* Allow reuse of local addresses. */ +#define SO_KEEPALIVE 0x0008 /* Keep connections alive and send + SIGPIPE when they die. */ +#define SO_DONTROUTE 0x0010 /* Don't do local routing. */ +#define SO_BROADCAST 0x0020 /* Allow transmission of + broadcast messages. */ +#define SO_LINGER 0x0080 /* Block on close of a reliable + socket to transmit pending data. */ +#define SO_OOBINLINE 0x0100 /* Receive out-of-band data in-band. */ +#if 0 +To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ +#endif + +#define SO_TYPE 0x1008 /* Compatible name for SO_STYLE. */ +#define SO_STYLE SO_TYPE /* Synonym */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_SNDBUF 0x1001 /* Send buffer size. */ +#define SO_RCVBUF 0x1002 /* Receive buffer. */ +#define SO_SNDLOWAT 0x1003 /* send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ACCEPTCONN 0x1009 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_PEERSEC 30 +#define SO_SNDBUFFORCE 31 +#define SO_RCVBUFFORCE 33 +#define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS + +#define SO_MARK 36 + +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#ifdef __KERNEL__ + +/** sock_type - Socket types + * + * Please notice that for binary compat reasons MIPS has to + * override the enum sock_type in include/linux/net.h, so + * we define ARCH_HAS_SOCKET_TYPES here. + * + * @SOCK_DGRAM - datagram (conn.less) socket + * @SOCK_STREAM - stream (connection) socket + * @SOCK_RAW - raw socket + * @SOCK_RDM - reliably-delivered message + * @SOCK_SEQPACKET - sequential packet socket + * @SOCK_PACKET - linux specific way of getting packets at the dev level. + * For writing rarp and other similar things on the user level. + */ +enum sock_type { + SOCK_DGRAM = 1, + SOCK_STREAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +#define SOCK_MAX (SOCK_PACKET + 1) +/* Mask which covers at least up to SOCK_MASK-1. The + * * remaining bits are used as flags. */ +#define SOCK_TYPE_MASK 0xf + +/* Flags for socket, socketpair, paccept */ +#define SOCK_CLOEXEC O_CLOEXEC +#define SOCK_NONBLOCK O_NONBLOCK + +#define ARCH_HAS_SOCKET_TYPES 1 + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SOCKET_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sockios.h b/target/linux/realtek/files/arch/rlx/include/asm/sockios.h new file mode 100644 index 000000000..ed1a5f78d --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sockios.h @@ -0,0 +1,26 @@ +/* + * Socket-level I/O control calls. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 by Ralf Baechle + */ +#ifndef _ASM_SOCKIOS_H +#define _ASM_SOCKIOS_H + +#include <asm/ioctl.h> + +/* Socket-level I/O control calls. */ +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ + +#endif /* _ASM_SOCKIOS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sparsemem.h b/target/linux/realtek/files/arch/rlx/include/asm/sparsemem.h new file mode 100644 index 000000000..795ac6c23 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sparsemem.h @@ -0,0 +1,14 @@ +#ifndef _MIPS_SPARSEMEM_H +#define _MIPS_SPARSEMEM_H +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 28 +#define MAX_PHYSMEM_BITS 35 + +#endif /* CONFIG_SPARSEMEM */ +#endif /* _MIPS_SPARSEMEM_H */ + diff --git a/target/linux/realtek/files/arch/rlx/include/asm/spinlock.h b/target/linux/realtek/files/arch/rlx/include/asm/spinlock.h new file mode 100644 index 000000000..fc33c7878 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/spinlock.h @@ -0,0 +1,336 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_SPINLOCK_H +#define _ASM_SPINLOCK_H + +/* tonywu: NOT USED in single CPU platform */ + +#include <linux/compiler.h> +#include <asm/barrier.h> + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + * + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * These are fair FIFO ticket locks + * + * (the type definitions are in asm/spinlock_types.h) + */ + + +/* + * Ticket locks are conceptually two parts, one indicating the current head of + * the queue, and the other indicating the current tail. The lock is acquired + * by atomically noting the tail and incrementing it by one (thus adding + * ourself to the queue and noting our position), then waiting until the head + * becomes equal to the the initial value of the tail. + */ + +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + unsigned int counters = ACCESS_ONCE(lock->lock); + + return ((counters >> 14) ^ counters) & 0x1fff; +} + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_unlock_wait(x) \ + while (__raw_spin_is_locked(x)) { cpu_relax(); } + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + unsigned int counters = ACCESS_ONCE(lock->lock); + + return (((counters >> 14) - counters) & 0x1fff) > 1; +} +#define __raw_spin_is_contended __raw_spin_is_contended + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + int my_ticket; + int tmp; + + __asm__ __volatile__ ( + " .set push # __raw_spin_lock \n" + " .set noreorder \n" + " \n" + " ll %[ticket], %[ticket_ptr] \n" + "1: addiu %[my_ticket], %[ticket], 0x4000 \n" + " sc %[my_ticket], %[ticket_ptr] \n" + " beqz %[my_ticket], 3f \n" + " nop \n" + " srl %[my_ticket], %[ticket], 14 \n" + " andi %[my_ticket], %[my_ticket], 0x1fff \n" + " andi %[ticket], %[ticket], 0x1fff \n" + " bne %[ticket], %[my_ticket], 4f \n" + " subu %[ticket], %[my_ticket], %[ticket] \n" + "2: \n" + " .subsection 2 \n" + "3: b 1b \n" + " ll %[ticket], %[ticket_ptr] \n" + " \n" + "4: andi %[ticket], %[ticket], 0x1fff \n" + " sll %[ticket], 5 \n" + " \n" + "6: bnez %[ticket], 6b \n" + " subu %[ticket], 1 \n" + " \n" + " lw %[ticket], %[ticket_ptr] \n" + " andi %[ticket], %[ticket], 0x1fff \n" + " beq %[ticket], %[my_ticket], 2b \n" + " subu %[ticket], %[my_ticket], %[ticket] \n" + " b 4b \n" + " subu %[ticket], %[ticket], 1 \n" + " .previous \n" + " .set pop \n" + : [ticket_ptr] "+m" (lock->lock), + [ticket] "=&r" (tmp), + [my_ticket] "=&r" (my_ticket)); + + smp_llsc_mb(); +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + int tmp; + + smp_llsc_mb(); + + __asm__ __volatile__ ( + " .set push # __raw_spin_unlock \n" + " .set noreorder \n" + " \n" + " ll %[ticket], %[ticket_ptr] \n" + "1: addiu %[ticket], %[ticket], 1 \n" + " ori %[ticket], %[ticket], 0x2000 \n" + " xori %[ticket], %[ticket], 0x2000 \n" + " sc %[ticket], %[ticket_ptr] \n" + " beqz %[ticket], 2f \n" + " nop \n" + " \n" + " .subsection 2 \n" + "2: b 1b \n" + " ll %[ticket], %[ticket_ptr] \n" + " .previous \n" + " .set pop \n" + : [ticket_ptr] "+m" (lock->lock), + [ticket] "=&r" (tmp)); +} + +static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +{ + int tmp, tmp2, tmp3; + + __asm__ __volatile__ ( + " .set push # __raw_spin_trylock \n" + " .set noreorder \n" + " \n" + " ll %[ticket], %[ticket_ptr] \n" + "1: srl %[my_ticket], %[ticket], 14 \n" + " andi %[my_ticket], %[my_ticket], 0x1fff \n" + " andi %[now_serving], %[ticket], 0x1fff \n" + " bne %[my_ticket], %[now_serving], 3f \n" + " addiu %[ticket], %[ticket], 0x4000 \n" + " sc %[ticket], %[ticket_ptr] \n" + " beqz %[ticket], 4f \n" + " li %[ticket], 1 \n" + "2: \n" + " .subsection 2 \n" + "3: b 2b \n" + " li %[ticket], 0 \n" + "4: b 1b \n" + " ll %[ticket], %[ticket_ptr] \n" + " .previous \n" + " .set pop \n" + : [ticket_ptr] "+m" (lock->lock), + [ticket] "=&r" (tmp), + [my_ticket] "=&r" (tmp2), + [now_serving] "=&r" (tmp3)); + + smp_llsc_mb(); + + return tmp; +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * + * NOTE! it is quite common to have readers in interrupts but no interrupt + * writers. For those circumstances we can "mix" irq-safe locks - any writer + * needs to get a irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +/* + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +#define __raw_read_can_lock(rw) ((rw)->lock >= 0) + +/* + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +#define __raw_write_can_lock(rw) (!(rw)->lock) + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + unsigned int tmp; + + __asm__ __volatile__( + " .set noreorder # __raw_read_lock \n" + "1: ll %1, %2 \n" + " bltz %1, 2f \n" + " addu %1, 1 \n" + " sc %1, %0 \n" + " beqz %1, 1b \n" + " nop \n" + " .subsection 2 \n" + "2: ll %1, %2 \n" + " bltz %1, 2b \n" + " addu %1, 1 \n" + " b 1b \n" + " nop \n" + " .previous \n" + " .set reorder \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + + smp_llsc_mb(); +} + +/* Note the use of sub, not subu which will make the kernel die with an + overflow exception if we ever try to unlock an rwlock that is already + unlocked or is being held by a writer. */ +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + unsigned int tmp; + + smp_llsc_mb(); + + __asm__ __volatile__( + " .set noreorder # __raw_read_unlock \n" + "1: ll %1, %2 \n" + " sub %1, 1 \n" + " sc %1, %0 \n" + " beqz %1, 2f \n" + " nop \n" + " .subsection 2 \n" + "2: b 1b \n" + " nop \n" + " .previous \n" + " .set reorder \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); +} + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + unsigned int tmp; + + __asm__ __volatile__( + " .set noreorder # __raw_write_lock \n" + "1: ll %1, %2 \n" + " bnez %1, 2f \n" + " lui %1, 0x8000 \n" + " sc %1, %0 \n" + " beqz %1, 2f \n" + " nop \n" + " .subsection 2 \n" + "2: ll %1, %2 \n" + " bnez %1, 2b \n" + " lui %1, 0x8000 \n" + " b 1b \n" + " nop \n" + " .previous \n" + " .set reorder \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + + smp_llsc_mb(); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + smp_mb(); + + __asm__ __volatile__( + " # __raw_write_unlock \n" + " sw $0, %0 \n" + : "=m" (rw->lock) + : "m" (rw->lock) + : "memory"); +} + +static inline int __raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned int tmp; + int ret; + + __asm__ __volatile__( + " .set noreorder # __raw_read_trylock \n" + " li %2, 0 \n" + "1: ll %1, %3 \n" + " bltz %1, 2f \n" + " addu %1, 1 \n" + " sc %1, %0 \n" + " beqz %1, 1b \n" + " nop \n" + " .set reorder \n" + __WEAK_LLSC_MB + " li %2, 1 \n" + "2: \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + + return ret; +} + +static inline int __raw_write_trylock(raw_rwlock_t *rw) +{ + unsigned int tmp; + int ret; + + __asm__ __volatile__( + " .set noreorder # __raw_write_trylock \n" + " li %2, 0 \n" + "1: ll %1, %3 \n" + " bnez %1, 2f \n" + " lui %1, 0x8000 \n" + " sc %1, %0 \n" + " beqz %1, 3f \n" + " li %2, 1 \n" + "2: \n" + __WEAK_LLSC_MB + " .subsection 2 \n" + "3: b 1b \n" + " li %2, 0 \n" + " .previous \n" + " .set reorder \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + + return ret; +} + +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) + +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + +#endif /* _ASM_SPINLOCK_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/spinlock_types.h b/target/linux/realtek/files/arch/rlx/include/asm/spinlock_types.h new file mode 100644 index 000000000..adeedaa11 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/spinlock_types.h @@ -0,0 +1,25 @@ +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + /* + * bits 0..13: serving_now + * bits 14 : junk data + * bits 15..28: ticket + */ + unsigned int lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/target/linux/realtek/files/arch/rlx/include/asm/stackframe.h b/target/linux/realtek/files/arch/rlx/include/asm/stackframe.h new file mode 100644 index 000000000..c7afbe04b --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/stackframe.h @@ -0,0 +1,459 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle + * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_STACKFRAME_H +#define _ASM_STACKFRAME_H + +#include <linux/threads.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/rlxregs.h> +#include <asm/asm-offsets.h> + +#define STATMASK 0x3f + + .macro SAVE_AT + .set push + .set noat + LONG_S $1, PT_R1(sp) + .set pop + .endm + + .macro SAVE_TEMP + mfhi v1 + LONG_S v1, PT_HI(sp) + mflo v1 + LONG_S v1, PT_LO(sp) + LONG_S $8, PT_R8(sp) + LONG_S $9, PT_R9(sp) + LONG_S $10, PT_R10(sp) + LONG_S $11, PT_R11(sp) + LONG_S $12, PT_R12(sp) + LONG_S $13, PT_R13(sp) + LONG_S $14, PT_R14(sp) + LONG_S $15, PT_R15(sp) + LONG_S $24, PT_R24(sp) + .endm + + .macro SAVE_STATIC + LONG_S $16, PT_R16(sp) + LONG_S $17, PT_R17(sp) + LONG_S $18, PT_R18(sp) + LONG_S $19, PT_R19(sp) + LONG_S $20, PT_R20(sp) + LONG_S $21, PT_R21(sp) + LONG_S $22, PT_R22(sp) + LONG_S $23, PT_R23(sp) + LONG_S $30, PT_R30(sp) + .endm + + /* + * Support RADIAX save and restore + */ +#ifdef CONFIG_CPU_HAS_RADIAX + .macro SAVE_RADIAX + .set push + .set at + mflxc0 $8, $0 + mflxc0 $9, $1 + mflxc0 $10, $2 + mfru $11, $0 + mfru $12, $1 + mfru $13, $2 + sw $8, PT_ESTATUS(sp) + sw $9, PT_ECAUSE(sp) + sw $10, PT_INTVEC(sp) + sw $11, PT_CBS0(sp) + sw $12, PT_CBS1(sp) + sw $13, PT_CBS2(sp) + mfru $8, $4 + mfru $9, $5 + mfru $10, $6 + mfru $11, $16 + mfru $12, $17 + mfru $13, $18 + mfru $14, $24 + sw $8, PT_CBE0(sp) + sw $9, PT_CBE1(sp) + sw $10, PT_CBE2(sp) + sw $11, PT_LPS0(sp) + sw $12, PT_LPE0(sp) + sw $13, PT_LPC0(sp) + sw $14, PT_MMD(sp) + mfa $8, $1 + mfa $9, $1, 8 + mfa $10, $2 + mfa $11, $2, 8 + sw $8, PT_M0LL(sp) + srl $12, $9,24 + sw $12, PT_M0LH(sp) + sw $10, PT_M0HL(sp) + srl $12, $11,24 + sw $12, PT_M0HH(sp) + mfa $8, $5 + mfa $9, $5,8 + mfa $10, $6 + mfa $11, $6,8 + sw $8, PT_M1LL(sp) + srl $12, $9,24 + sw $12, PT_M1LH(sp) + sw $10, PT_M1HL(sp) + srl $12, $11,24 + sw $12, PT_M1HH(sp) + mfa $8, $9 + mfa $9, $9, 8 + mfa $10, $10 + mfa $11, $10, 8 + sw $8, PT_M2LL(sp) + srl $12, $9, 24 + sw $12, PT_M2LH(sp) + sw $10, PT_M2HL(sp) + srl $12, $11, 24 + sw $12, PT_M2HH(sp) + mfa $8, $13 + mfa $9, $13, 8 + mfa $10, $14 + mfa $11, $14, 8 + sw $8, PT_M3LL(sp) + srl $12, $9, 24 + sw $12, PT_M3LH(sp) + sw $10, PT_M3HL(sp) + srl $12, $11, 24 + sw $12, PT_M3HH(sp) + .set pop + .endm + + .macro RESTORE_RADIAX + .set push + .set at + lw $8, PT_ESTASUS(sp) + lw $9, PT_ECAUSE(sp) + lw $10, PT_INTVEC(sp) + lw $11, PT_CBS0(sp) + lw $12, PT_CBS1(sp) + lw $13, PT_CBS2(sp) + mtlxc0 $8, $0 + mtlxc0 $10, $2 + mtru $11, $0 + mtru $12, $1 + mtru $13, $2 + lw $8, PT_CBE0(sp) + lw $9, PT_CBE1(sp) + lw $10, PT_CBE2(sp) + lw $11, PT_LPS0(sp) + lw $12, PT_LPE0(sp) + lw $13, PT_LPC0(sp) + lw $14, PT_MMD(sp) + mtru $8, $4 + mtru $9, $5 + mtru $10, $6 + mtru $11, $16 + mtru $12, $17 + mtru $13, $18 + mtru $14, $24 + lw $8, PT_M0LL(sp) + lw $9, PT_M0LH(sp) + lw $10, PT_M0HL(sp) + lw $11, PT_M0HH(sp) + mta2 $8, $1 + sll $12, $9, 24 + mta2.g $12, $1 + mta2 $10, $2 + sll $12, $11, 24 + mta2.g $12, $2 + lw $8, PT_M1LL(sp) + lw $9, PT_M1LH(sp) + lw $10, PT_M1HL(sp) + lw $11, PT_M1HH(sp) + mta2 $8, $5 + sll $12, $9, 24 + mta2.g $12, $5 + mta2 $10, $6 + sll $12, $11, 24 + mta2.g $12, $6 + lw $8, PT_M2LL(sp) + lw $9, PT_M2LH(sp) + lw $10, PT_M2HL(sp) + lw $11, PT_M2HH(sp) + mta2 $8, $9 + sll $12, $9, 24 + mta2.g $12, $9 + mta2 $10, $10 + sll $12, $11, 24 + mta2.g $12, $10 + lw $8, PT_M3LL(sp) + lw $9, PT_M3LH(sp) + lw $10, PT_M3HL(sp) + lw $11, PT_M3HH(sp) + mta2 $8, $13 + sll $12, $9, 24 + mta2.g $12, $13 + mta2 $10, $13 + sll $12, $11, 24 + mta2.g $12, $13 + .set pop + .endm +#endif + + .macro get_saved_sp /* Uniprocessor variation */ + lui k1, %hi(kernelsp) + LONG_L k1, %lo(kernelsp)(k1) + .endm + + .macro set_saved_sp stackp temp temp2 + LONG_S \stackp, kernelsp + .endm + + .macro SAVE_SP + .set push + .set noat + .set reorder + mfc0 k0, CP0_STATUS + sll k0, 3 /* extract cu0 bit */ + .set noreorder + bltz k0, 8f + move k1, sp + .set reorder + /* Called from user mode, new stack. */ + get_saved_sp +8: move k0, sp + PTR_SUBU sp, k1, PT_SIZE + LONG_S k0, PT_R29(sp) + .set pop + .endm + + .macro SAVE_SOME_BUT_SP + .set push + .set noat + .set reorder + LONG_S $3, PT_R3(sp) + LONG_S $0, PT_R0(sp) + mfc0 v1, CP0_STATUS + LONG_S $2, PT_R2(sp) + LONG_S v1, PT_STATUS(sp) + LONG_S $4, PT_R4(sp) + mfc0 v1, CP0_CAUSE + LONG_S $5, PT_R5(sp) + LONG_S v1, PT_CAUSE(sp) + LONG_S $6, PT_R6(sp) + MFC0 v1, CP0_EPC + LONG_S $7, PT_R7(sp) + LONG_S v1, PT_EPC(sp) + LONG_S $25, PT_R25(sp) + LONG_S $28, PT_R28(sp) + LONG_S $31, PT_R31(sp) + ori $28, sp, _THREAD_MASK + xori $28, _THREAD_MASK + .set pop + .endm + + .macro SAVE_ALL_BUT_SP + SAVE_SOME_BUT_SP + SAVE_AT + SAVE_TEMP + SAVE_STATIC +#ifdef CONFIG_CPU_HAS_RADIAX + SAVE_RADIAX +#endif + .endm + + .macro SAVE_SOME + .set push + .set noat + .set reorder + mfc0 k0, CP0_STATUS + sll k0, 3 /* extract cu0 bit */ + .set noreorder + bltz k0, 8f + move k1, sp + .set reorder + /* Called from user mode, new stack. */ + get_saved_sp +8: move k0, sp + PTR_SUBU sp, k1, PT_SIZE + LONG_S k0, PT_R29(sp) + LONG_S $3, PT_R3(sp) + /* + * You might think that you don't need to save $0, + * but the FPU emulator and gdb remote debug stub + * need it to operate correctly + */ + LONG_S $0, PT_R0(sp) + mfc0 v1, CP0_STATUS + LONG_S $2, PT_R2(sp) + LONG_S v1, PT_STATUS(sp) + LONG_S $4, PT_R4(sp) + mfc0 v1, CP0_CAUSE + LONG_S $5, PT_R5(sp) + LONG_S v1, PT_CAUSE(sp) + LONG_S $6, PT_R6(sp) + MFC0 v1, CP0_EPC + LONG_S $7, PT_R7(sp) + LONG_S v1, PT_EPC(sp) + LONG_S $25, PT_R25(sp) + LONG_S $28, PT_R28(sp) + LONG_S $31, PT_R31(sp) + ori $28, sp, _THREAD_MASK + xori $28, _THREAD_MASK + .set pop + .endm + + .macro SAVE_ALL + SAVE_SOME + SAVE_AT + SAVE_TEMP + SAVE_STATIC +#ifdef CONFIG_CPU_HAS_RADIAX + SAVE_RADIAX +#endif + .endm + + .macro RESTORE_AT + .set push + .set noat + LONG_L $1, PT_R1(sp) + .set pop + .endm + + .macro RESTORE_TEMP + LONG_L $24, PT_LO(sp) + mtlo $24 + LONG_L $24, PT_HI(sp) + mthi $24 + LONG_L $8, PT_R8(sp) + LONG_L $9, PT_R9(sp) + LONG_L $10, PT_R10(sp) + LONG_L $11, PT_R11(sp) + LONG_L $12, PT_R12(sp) + LONG_L $13, PT_R13(sp) + LONG_L $14, PT_R14(sp) + LONG_L $15, PT_R15(sp) + LONG_L $24, PT_R24(sp) + .endm + + .macro RESTORE_STATIC + LONG_L $16, PT_R16(sp) + LONG_L $17, PT_R17(sp) + LONG_L $18, PT_R18(sp) + LONG_L $19, PT_R19(sp) + LONG_L $20, PT_R20(sp) + LONG_L $21, PT_R21(sp) + LONG_L $22, PT_R22(sp) + LONG_L $23, PT_R23(sp) + LONG_L $30, PT_R30(sp) + .endm + + .macro RESTORE_SOME + .set push + .set reorder + .set noat + mfc0 a0, CP0_STATUS + li v1, 0xff00 + ori a0, STATMASK + xori a0, STATMASK + mtc0 a0, CP0_STATUS + and a0, v1 + LONG_L v0, PT_STATUS(sp) + nor v1, $0, v1 + and v0, v1 + or v0, a0 + mtc0 v0, CP0_STATUS + LONG_L $31, PT_R31(sp) + LONG_L $28, PT_R28(sp) + LONG_L $25, PT_R25(sp) + LONG_L $7, PT_R7(sp) + LONG_L $6, PT_R6(sp) + LONG_L $5, PT_R5(sp) + LONG_L $4, PT_R4(sp) + LONG_L $3, PT_R3(sp) + LONG_L $2, PT_R2(sp) + .set pop + .endm + + .macro RESTORE_SP_AND_RET + .set push + .set noreorder + LONG_L k0, PT_EPC(sp) + LONG_L sp, PT_R29(sp) + jr k0 + rfe + .set pop + .endm + + .macro RESTORE_SP + LONG_L sp, PT_R29(sp) + .endm + + .macro RESTORE_ALL +#ifdef CONFIG_CPU_HAS_RADIAX + RESTORE_RADIAX +#endif + RESTORE_TEMP + RESTORE_STATIC + RESTORE_AT + RESTORE_SOME + RESTORE_SP + .endm + + .macro RESTORE_ALL_AND_RET +#ifdef CONFIG_CPU_HAS_RADIAX + RESTORE_RADIAX +#endif + RESTORE_TEMP + RESTORE_STATIC + RESTORE_AT + RESTORE_SOME + RESTORE_SP_AND_RET + .endm + +/* + * Move to kernel mode and disable interrupts. + * Set cp0 enable bit as sign that we're running on the kernel stack + */ + .macro CLI + mfc0 t0, CP0_STATUS + li t1, ST0_CU0 | STATMASK + or t0, t1 + xori t0, STATMASK + mtc0 t0, CP0_STATUS + irq_disable_hazard + .endm + +/* + * Move to kernel mode and enable interrupts. + * Set cp0 enable bit as sign that we're running on the kernel stack + */ + .macro STI + mfc0 t0, CP0_STATUS + li t1, ST0_CU0 | STATMASK + or t0, t1 + xori t0, STATMASK & ~1 + mtc0 t0, CP0_STATUS + irq_enable_hazard + .endm + +/* + * Just move to kernel mode and leave interrupts as they are. Note + * for the R3000 this means copying the previous enable from IEp. + * Set cp0 enable bit as sign that we're running on the kernel stack + */ + .macro KMODE + mfc0 t0, CP0_STATUS + li t1, ST0_CU0 | (STATMASK & ~1) + andi t2, t0, ST0_IEP + srl t2, 2 + or t0, t2 + or t0, t1 + xori t0, STATMASK & ~1 + mtc0 t0, CP0_STATUS + irq_disable_hazard + .endm + +#endif /* _ASM_STACKFRAME_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/stacktrace.h b/target/linux/realtek/files/arch/rlx/include/asm/stacktrace.h new file mode 100644 index 000000000..49ecb2c0c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/stacktrace.h @@ -0,0 +1,41 @@ +#ifndef _ASM_STACKTRACE_H +#define _ASM_STACKTRACE_H + +#include <asm/ptrace.h> + +#ifdef CONFIG_KALLSYMS +extern int raw_show_trace; +extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, + unsigned long pc, unsigned long *ra); +#else +#define raw_show_trace 1 +static inline unsigned long unwind_stack(struct task_struct *task, + unsigned long *sp, unsigned long pc, unsigned long *ra) +{ + return 0; +} +#endif + +static __always_inline void prepare_frametrace(struct pt_regs *regs) +{ +#ifndef CONFIG_KALLSYMS + /* + * Remove any garbage that may be in regs (specially func + * addresses) to avoid show_raw_backtrace() to report them + */ + memset(regs, 0, sizeof(*regs)); +#endif + __asm__ __volatile__( + ".set push\n\t" + ".set noat\n\t" + "1: la $1, 1b\n\t" + "sw $1, %0\n\t" + "sw $29, %1\n\t" + "sw $31, %2\n\t" + ".set pop\n\t" + : "=m" (regs->cp0_epc), + "=m" (regs->regs[29]), "=m" (regs->regs[31]) + : : "memory"); +} + +#endif /* _ASM_STACKTRACE_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/stat.h b/target/linux/realtek/files/arch/rlx/include/asm/stat.h new file mode 100644 index 000000000..ba1be8da6 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/stat.h @@ -0,0 +1,87 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999, 2000 Ralf Baechle + * Copyright (C) 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_STAT_H +#define _ASM_STAT_H + +#include <linux/types.h> + +#include <asm/sgidefs.h> + +struct stat { + unsigned st_dev; + long st_pad1[3]; /* Reserved for network id */ + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + unsigned st_rdev; + long st_pad2[2]; + off_t st_size; + long st_pad3; + /* + * Actually this should be timestruc_t st_atime, st_mtime and st_ctime + * but we don't have it under Linux. + */ + time_t st_atime; + long st_atime_nsec; + time_t st_mtime; + long st_mtime_nsec; + time_t st_ctime; + long st_ctime_nsec; + long st_blksize; + long st_blocks; + long st_pad4[14]; +}; + +/* + * This matches struct stat64 in glibc2.1, hence the absolutely insane + * amounts of padding around dev_t's. The memory layout is the same as of + * struct stat of the 64-bit kernel. + */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_pad0[3]; /* Reserved for st_dev expansion */ + + unsigned long long st_ino; + + mode_t st_mode; + nlink_t st_nlink; + + uid_t st_uid; + gid_t st_gid; + + unsigned long st_rdev; + unsigned long st_pad1[3]; /* Reserved for st_rdev expansion */ + + long long st_size; + + /* + * Actually this should be timestruc_t st_atime, st_mtime and st_ctime + * but we don't have it under Linux. + */ + time_t st_atime; + unsigned long st_atime_nsec; /* Reserved for st_atime expansion */ + + time_t st_mtime; + unsigned long st_mtime_nsec; /* Reserved for st_mtime expansion */ + + time_t st_ctime; + unsigned long st_ctime_nsec; /* Reserved for st_ctime expansion */ + + unsigned long st_blksize; + unsigned long st_pad2; + + long long st_blocks; +}; + +#define STAT_HAVE_NSEC 1 + +#endif /* _ASM_STAT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/statfs.h b/target/linux/realtek/files/arch/rlx/include/asm/statfs.h new file mode 100644 index 000000000..f38196201 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/statfs.h @@ -0,0 +1,57 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999 by Ralf Baechle + */ +#ifndef _ASM_STATFS_H +#define _ASM_STATFS_H + +#include <linux/posix_types.h> +#include <asm/sgidefs.h> + +#ifndef __KERNEL_STRICT_NAMES + +#include <linux/types.h> + +typedef __kernel_fsid_t fsid_t; + +#endif + +struct statfs { + long f_type; +#define f_fstyp f_type + long f_bsize; + long f_frsize; /* Fragment size - unsupported */ + long f_blocks; + long f_bfree; + long f_files; + long f_ffree; + long f_bavail; + + /* Linux specials */ + __kernel_fsid_t f_fsid; + long f_namelen; + long f_spare[6]; +}; + +/* + * Unlike the traditional version the LFAPI version has none of the ABI junk + */ +struct statfs64 { + __u32 f_type; + __u32 f_bsize; + __u32 f_frsize; /* Fragment size - unsupported */ + __u32 __pad; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_files; + __u64 f_ffree; + __u64 f_bavail; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_spare[6]; +}; + +#endif /* _ASM_STATFS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/string.h b/target/linux/realtek/files/arch/rlx/include/asm/string.h new file mode 100644 index 000000000..a25225fda --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/string.h @@ -0,0 +1,140 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994, 95, 96, 97, 98, 2000, 01 Ralf Baechle + * Copyright (c) 2000 by Silicon Graphics, Inc. + * Copyright (c) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_STRING_H +#define _ASM_STRING_H + + +/* + * Most of the inline functions are rather naive implementations so I just + * didn't bother updating them for 64-bit ... + */ +#ifndef IN_STRING_C + +#define __HAVE_ARCH_STRCPY +static __inline__ char *strcpy(char *__dest, __const__ char *__src) +{ + char *__xdest = __dest; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n" + "1:\tlbu\t$1,(%1)\n\t" + "addiu\t%1,1\n\t" + "sb\t$1,(%0)\n\t" + "bnez\t$1,1b\n\t" + "addiu\t%0,1\n\t" + ".set\tat\n\t" + ".set\treorder" + : "=r" (__dest), "=r" (__src) + : "0" (__dest), "1" (__src) + : "memory"); + + return __xdest; +} + +#define __HAVE_ARCH_STRNCPY +static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n) +{ + char *__xdest = __dest; + + if (__n == 0) + return __xdest; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n" + "1:\tlbu\t$1,(%1)\n\t" + "subu\t%2,1\n\t" + "sb\t$1,(%0)\n\t" + "beqz\t$1,2f\n\t" + "addiu\t%0,1\n\t" + "bnez\t%2,1b\n\t" + "addiu\t%1,1\n" + "2:\n\t" + ".set\tat\n\t" + ".set\treorder" + : "=r" (__dest), "=r" (__src), "=r" (__n) + : "0" (__dest), "1" (__src), "2" (__n) + : "memory"); + + return __xdest; +} + +#define __HAVE_ARCH_STRCMP +static __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct) +{ + int __res; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + "lbu\t%2,(%0)\n" + "1:\tlbu\t$1,(%1)\n\t" + "addiu\t%0,1\n\t" + "bne\t$1,%2,2f\n\t" + "addiu\t%1,1\n\t" + "bnez\t%2,1b\n\t" + "lbu\t%2,(%0)\n\t" +#if defined(CONFIG_CPU_RLX4180) || defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) + "nop\n\t" +#endif + "move\t%2,$1\n" + "2:\tsubu\t%2,$1\n" + "3:\t.set\tat\n\t" + ".set\treorder" + : "=r" (__cs), "=r" (__ct), "=r" (__res) + : "0" (__cs), "1" (__ct), "m" (*__cs), "m" (*__ct)); + + return __res; +} + +#endif /* !defined(IN_STRING_C) */ + +#define __HAVE_ARCH_STRNCMP +static __inline__ int +strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count) +{ + int __res; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + ".set\tnoat\n" + "1:\tlbu\t%3,(%0)\n\t" + "beqz\t%2,2f\n\t" + "lbu\t$1,(%1)\n\t" + "subu\t%2,1\n\t" + "bne\t$1,%3,3f\n\t" + "addiu\t%0,1\n\t" + "bnez\t%3,1b\n\t" + "addiu\t%1,1\n" + "2:\n\t" +#if defined(CONFIG_CPU_RLX4180) || defined(CONFIG_CPU_RLX4181) || defined(CONFIG_CPU_RLX5181) + "nop\n\t" +#endif + "move\t%3,$1\n" + "3:\tsubu\t%3,$1\n\t" + ".set\tat\n\t" + ".set\treorder" + : "=r" (__cs), "=r" (__ct), "=r" (__count), "=r" (__res) + : "0" (__cs), "1" (__ct), "2" (__count), "m" (*__cs), "m" (*__ct)); + + return __res; +} + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#endif /* _ASM_STRING_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/suspend.h b/target/linux/realtek/files/arch/rlx/include/asm/suspend.h new file mode 100644 index 000000000..2562f8f9b --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/suspend.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +/* Somewhen... Maybe :-) */ + +#endif /* __ASM_SUSPEND_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/swab.h b/target/linux/realtek/files/arch/rlx/include/asm/swab.h new file mode 100644 index 000000000..d5b3f9048 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/swab.h @@ -0,0 +1,16 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 99, 2003 by Ralf Baechle + */ +#ifndef _ASM_SWAB_H +#define _ASM_SWAB_H + +#include <linux/compiler.h> +#include <linux/types.h> + +#define __SWAB_64_THRU_32__ + +#endif /* _ASM_SWAB_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/sysmips.h b/target/linux/realtek/files/arch/rlx/include/asm/sysmips.h new file mode 100644 index 000000000..4f47b7d6a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/sysmips.h @@ -0,0 +1,25 @@ +/* + * Definitions for the MIPS sysmips(2) call + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 by Ralf Baechle + */ +#ifndef _ASM_SYSMIPS_H +#define _ASM_SYSMIPS_H + +/* + * Commands for the sysmips(2) call + * + * sysmips(2) is deprecated - though some existing software uses it. + * We only support the following commands. + */ +#define SETNAME 1 /* set hostname */ +#define FLUSH_CACHE 3 /* writeback and invalidate caches */ +#define MIPS_FIXADE 7 /* control address error fixing */ +#define MIPS_RDNVRAM 10 /* read NVRAM */ +#define MIPS_ATOMIC_SET 2001 /* atomically set variable */ + +#endif /* _ASM_SYSMIPS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/system.h b/target/linux/realtek/files/arch/rlx/include/asm/system.h new file mode 100644 index 000000000..7ca9747e0 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/system.h @@ -0,0 +1,106 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#ifndef _ASM_SYSTEM_H +#define _ASM_SYSTEM_H + +#include <linux/types.h> +#include <linux/irqflags.h> +#include <linux/linkage.h> + +#include <asm/addrspace.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> +#include <asm/cpu-features.h> + + +/* + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +extern asmlinkage void *resume(void *last, void *next, void *next_ti); + +struct task_struct; + +#define switch_to(prev, next, last) \ +do { \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + + +static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) +{ + __u32 retval; + +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long dummy; + + __asm__ __volatile__( + "1: ll %0, %3 # xchg_u32 \n" + " move %2, %z4 \n" + " sc %2, %1 \n" + " beqz %2, 2f \n" + " .subsection 2 \n" + "2: b 1b \n" + " .previous \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); +#else + unsigned long flags; + + raw_local_irq_save(flags); + retval = *m; + *m = val; + raw_local_irq_restore(flags); /* implies memory barrier */ +#endif + + smp_llsc_mb(); + return retval; +} + +extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); +#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid xchg(). */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +extern void set_handler(unsigned long offset, void *addr, unsigned long len); +extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); + +extern void *set_except_vector(int n, void *addr); +extern unsigned long ebase; +extern void per_cpu_trap_init(void); + +/* + * See include/asm-ia64/system.h; prevents deadlock on SMP + * systems. + */ +#define __ARCH_WANT_UNLOCKED_CTXSW + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_SYSTEM_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/termbits.h b/target/linux/realtek/files/arch/rlx/include/asm/termbits.h new file mode 100644 index 000000000..c83c68444 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/termbits.h @@ -0,0 +1,226 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 99, 2001, 06 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_TERMBITS_H +#define _ASM_TERMBITS_H + +#include <linux/posix_types.h> + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +/* + * The ABI says nothing about NCC but seems to use NCCS as + * replacement for it in struct termio + */ +#define NCCS 23 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VINTR 0 /* Interrupt character [ISIG]. */ +#define VQUIT 1 /* Quit character [ISIG]. */ +#define VERASE 2 /* Erase character [ICANON]. */ +#define VKILL 3 /* Kill-line character [ICANON]. */ +#define VMIN 4 /* Minimum number of bytes read at once [!ICANON]. */ +#define VTIME 5 /* Time-out value (tenths of a second) [!ICANON]. */ +#define VEOL2 6 /* Second EOL character [ICANON]. */ +#define VSWTC 7 /* ??? */ +#define VSWTCH VSWTC +#define VSTART 8 /* Start (X-ON) character [IXON, IXOFF]. */ +#define VSTOP 9 /* Stop (X-OFF) character [IXON, IXOFF]. */ +#define VSUSP 10 /* Suspend character [ISIG]. */ +#if 0 +/* + * VDSUSP is not supported + */ +#define VDSUSP 11 /* Delayed suspend character [ISIG]. */ +#endif +#define VREPRINT 12 /* Reprint-line character [ICANON]. */ +#define VDISCARD 13 /* Discard character [IEXTEN]. */ +#define VWERASE 14 /* Word-erase character [ICANON]. */ +#define VLNEXT 15 /* Literal-next character [IEXTEN]. */ +#define VEOF 16 /* End-of-file character [ICANON]. */ +#define VEOL 17 /* End-of-line character [ICANON]. */ + +/* c_iflag bits */ +#define IGNBRK 0000001 /* Ignore break condition. */ +#define BRKINT 0000002 /* Signal interrupt on break. */ +#define IGNPAR 0000004 /* Ignore characters with parity errors. */ +#define PARMRK 0000010 /* Mark parity and framing errors. */ +#define INPCK 0000020 /* Enable input parity check. */ +#define ISTRIP 0000040 /* Strip 8th bit off characters. */ +#define INLCR 0000100 /* Map NL to CR on input. */ +#define IGNCR 0000200 /* Ignore CR. */ +#define ICRNL 0000400 /* Map CR to NL on input. */ +#define IUCLC 0001000 /* Map upper case to lower case on input. */ +#define IXON 0002000 /* Enable start/stop output control. */ +#define IXANY 0004000 /* Any character will restart after stop. */ +#define IXOFF 0010000 /* Enable start/stop input control. */ +#define IMAXBEL 0020000 /* Ring bell when input queue is full. */ +#define IUTF8 0040000 /* Input is UTF-8 */ + +/* c_oflag bits */ +#define OPOST 0000001 /* Perform output processing. */ +#define OLCUC 0000002 /* Map lower case to upper case on output. */ +#define ONLCR 0000004 /* Map NL to CR-NL on output. */ +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 +/* +#define PAGEOUT ??? +#define WRAP ??? + */ + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 /* Number of bits per byte (mask). */ +#define CS5 0000000 /* 5 bits per byte. */ +#define CS6 0000020 /* 6 bits per byte. */ +#define CS7 0000040 /* 7 bits per byte. */ +#define CS8 0000060 /* 8 bits per byte. */ +#define CSTOPB 0000100 /* Two stop bits instead of one. */ +#define CREAD 0000200 /* Enable receiver. */ +#define PARENB 0000400 /* Parity enable. */ +#define PARODD 0001000 /* Odd parity instead of even. */ +#define HUPCL 0002000 /* Hang up on last close. */ +#define CLOCAL 0004000 /* Ignore modem status lines. */ +#define CBAUDEX 0010000 +#define BOTHER 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ + +/* c_lflag bits */ +#define ISIG 0000001 /* Enable signals. */ +#define ICANON 0000002 /* Do erase and kill processing. */ +#define XCASE 0000004 +#define ECHO 0000010 /* Enable echo. */ +#define ECHOE 0000020 /* Visual erase for ERASE. */ +#define ECHOK 0000040 /* Echo NL after KILL. */ +#define ECHONL 0000100 /* Echo NL even if ECHO is off. */ +#define NOFLSH 0000200 /* Disable flush after interrupt. */ +#define IEXTEN 0000400 /* Enable DISCARD and LNEXT. */ +#define ECHOCTL 0001000 /* Echo control characters as ^X. */ +#define ECHOPRT 0002000 /* Hardcopy visual erase. */ +#define ECHOKE 0004000 /* Visual erase for KILL. */ +#define FLUSHO 0020000 +#define PENDIN 0040000 /* Retype pending input (state). */ +#define TOSTOP 0100000 /* Send SIGTTOU for background output. */ +#define ITOSTOP TOSTOP + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 /* Suspend output. */ +#define TCOON 1 /* Restart suspended output. */ +#define TCIOFF 2 /* Send a STOP character. */ +#define TCION 3 /* Send a START character. */ + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 /* Discard data received but not yet read. */ +#define TCOFLUSH 1 /* Discard data written but not yet sent. */ +#define TCIOFLUSH 2 /* Discard all pending data. */ + +/* tcsetattr uses these */ +#define TCSANOW TCSETS /* Change immediately. */ +#define TCSADRAIN TCSETSW /* Change when pending output is written. */ +#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */ + +#endif /* _ASM_TERMBITS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/termios.h b/target/linux/realtek/files/arch/rlx/include/asm/termios.h new file mode 100644 index 000000000..8f77f774a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/termios.h @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 2000, 2001 by Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + */ +#ifndef _ASM_TERMIOS_H +#define _ASM_TERMIOS_H + +#include <linux/errno.h> +#include <asm/termbits.h> +#include <asm/ioctls.h> + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + int sg_flags; /* SGI special - int, not short */ +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; /* stop process signal */ + char t_dsuspc; /* delayed stop process signal */ + char t_rprntc; /* reprint line */ + char t_flushc; /* flush output (toggles) */ + char t_werasc; /* word erase */ + char t_lnextc; /* literal next character */ +}; + +/* TIOCGSIZE, TIOCSSIZE not defined yet. Only needed for SunOS source + compatibility anyway ... */ + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + char c_line; /* line discipline */ + unsigned char c_cc[NCCS]; /* control characters */ +}; + +#ifdef __KERNEL__ +#include <linux/module.h> + +/* + * intr=^C quit=^\ erase=del kill=^U + * vmin=\1 vtime=\0 eol2=\0 swtc=\0 + * start=^Q stop=^S susp=^Z vdsusp= + * reprint=^R discard=^U werase=^W lnext=^V + * eof=^D eol=\0 + */ +#define INIT_C_CC "\003\034\177\025\1\0\0\0\021\023\032\0\022\017\027\026\004\0" +#endif + +/* modem lines */ +#define TIOCM_LE 0x001 /* line enable */ +#define TIOCM_DTR 0x002 /* data terminal ready */ +#define TIOCM_RTS 0x004 /* request to send */ +#define TIOCM_ST 0x010 /* secondary transmit */ +#define TIOCM_SR 0x020 /* secondary receive */ +#define TIOCM_CTS 0x040 /* clear to send */ +#define TIOCM_CAR 0x100 /* carrier detect */ +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RNG 0x200 /* ring */ +#define TIOCM_RI TIOCM_RNG +#define TIOCM_DSR 0x400 /* data set ready */ +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +#ifdef __KERNEL__ + +#include <linux/string.h> + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short iflag, oflag, cflag, lflag; + unsigned int err; + + if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) + return -EFAULT; + + err = __get_user(iflag, &termio->c_iflag); + termios->c_iflag = (termios->c_iflag & 0xffff0000) | iflag; + err |=__get_user(oflag, &termio->c_oflag); + termios->c_oflag = (termios->c_oflag & 0xffff0000) | oflag; + err |=__get_user(cflag, &termio->c_cflag); + termios->c_cflag = (termios->c_cflag & 0xffff0000) | cflag; + err |=__get_user(lflag, &termio->c_lflag); + termios->c_lflag = (termios->c_lflag & 0xffff0000) | lflag; + err |=__get_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_from_user(termios->c_cc, termio->c_cc, NCC)) + return -EFAULT; + + return 0; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + int err; + + if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) + return -EFAULT; + + err = __put_user(termios->c_iflag, &termio->c_iflag); + err |= __put_user(termios->c_oflag, &termio->c_oflag); + err |= __put_user(termios->c_cflag, &termio->c_cflag); + err |= __put_user(termios->c_lflag, &termio->c_lflag); + err |= __put_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_to_user(termio->c_cc, termios->c_cc, NCC)) + return -EFAULT; + + return 0; +} + +static inline int user_termios_to_kernel_termios(struct ktermios __user *k, + struct termios2 *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)) ? -EFAULT : 0; +} + +#endif /* defined(__KERNEL__) */ + +#endif /* _ASM_TERMIOS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/thread_info.h b/target/linux/realtek/files/arch/rlx/include/asm/thread_info.h new file mode 100644 index 000000000..597ba530f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/thread_info.h @@ -0,0 +1,134 @@ +/* thread_info.h: MIPS low-level thread information + * + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds and Dave Miller + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + + +#ifndef __ASSEMBLY__ + +#include <asm/processor.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + + mm_segment_t addr_limit; /* thread address space: + 0-0xBFFFFFFF for user-thead + 0-0xFFFFFFFF for kernel-thread + */ + struct restart_block restart_block; + struct pt_regs *regs; +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = _TIF_FIXADE, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("$28"); +#define current_thread_info() __current_thread_info + +/* thread information allocation */ +#ifdef CONFIG_KERNEL_STACK_SIZE_ORDER +#define THREAD_SIZE_ORDER (CONFIG_KERNEL_STACK_SIZE_ORDER) +#else +#define THREAD_SIZE_ORDER (1) +#endif +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_MASK (THREAD_SIZE - 1UL) + +#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR + +#ifdef CONFIG_DEBUG_STACK_USAGE +#define alloc_thread_info(tsk) \ +({ \ + struct thread_info *ret; \ + \ + ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \ + \ + ret; \ +}) +#else +#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) +#endif + +#define free_thread_info(info) kfree(info) + +#endif /* !__ASSEMBLY__ */ + +#define PREEMPT_ACTIVE 0x10000000 + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ +#define TIF_SECCOMP 4 /* secure computing */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 +#define TIF_FREEZE 19 +#define TIF_FIXADE 20 /* Fix address errors in software */ +#define TIF_LOGADE 21 /* Log address errors to syslog */ +#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ +#define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */ +#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ + +#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1<<TIF_SECCOMP) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) +#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) +#define _TIF_FREEZE (1<<TIF_FREEZE) +#define _TIF_FIXADE (1<<TIF_FIXADE) +#define _TIF_LOGADE (1<<TIF_LOGADE) +#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) +#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) + +/* work to do on interrupt/exception return */ +#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) +/* work to do on any return to u-space */ +#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_THREAD_INFO_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/time.h b/target/linux/realtek/files/arch/rlx/include/asm/time.h new file mode 100644 index 000000000..03f1a9c22 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/time.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2001, 2002, MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * Copyright (c) 2003 Maciej W. Rozycki + * + * include/asm-mips/time.h + * header file for the new style time.c file and time services. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_TIME_H +#define _ASM_TIME_H + +#include <linux/rtc.h> +#include <linux/spinlock.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> + +extern spinlock_t rtc_lock; + +/* + * RTC ops. By default, they point to weak no-op RTC functions. + * rtc_mips_set_time - reverse the above translation and set time to RTC. + * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need + * to be set. Used by RTC sync-up. + */ +extern int rtc_mips_set_time(unsigned long); +extern int rtc_mips_set_mmss(unsigned long); + +/* + * board specific routines required by time_init(). + */ +extern void bsp_time_init(void); + +extern void clocksource_set_clock(struct clocksource *cs, unsigned int clock); +extern void clockevent_set_clock(struct clock_event_device *cd, + unsigned int clock); + +#endif /* _ASM_TIME_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/timex.h b/target/linux/realtek/files/arch/rlx/include/asm/timex.h new file mode 100644 index 000000000..98cd9f885 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/timex.h @@ -0,0 +1,43 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998, 1999, 2003 by Ralf Baechle + */ +#ifndef _ASM_TIMEX_H +#define _ASM_TIMEX_H + +#ifdef __KERNEL__ + +#include <asm/rlxregs.h> + +/* + * This is the clock rate of the i8253 PIT. A MIPS system may not have + * a PIT by the symbol is used all over the kernel including some APIs. + * So keeping it defined to the number for the PIT is the only sane thing + * for now. + */ +#define CLOCK_TICK_RATE 1193182 + +/* + * Standard way to access the cycle counter. + * Currently only used on SMP for scheduling. + * + * Only the low 32 bits are available as a continuously counting entity. + * But this only means we'll force a reschedule every 8 seconds or so, + * which isn't an evil thing. + * + * We know that all SMP capable CPUs have cycle counters. + */ + +typedef unsigned int cycles_t; + +static inline cycles_t get_cycles(void) +{ + return 0; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_TIMEX_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/tlb.h b/target/linux/realtek/files/arch/rlx/include/asm/tlb.h new file mode 100644 index 000000000..80d9dfcf1 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/tlb.h @@ -0,0 +1,23 @@ +#ifndef __ASM_TLB_H +#define __ASM_TLB_H + +/* + * MIPS doesn't need any special per-pte or per-vma handling, except + * we need to flush cache for area to be unmapped. + */ +#define tlb_start_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm) \ + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ + } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) + +/* + * .. because we flush the whole mm when it fills up. + */ +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include <asm-generic/tlb.h> + +#endif /* __ASM_TLB_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/tlbdebug.h b/target/linux/realtek/files/arch/rlx/include/asm/tlbdebug.h new file mode 100644 index 000000000..bb8f5c29c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/tlbdebug.h @@ -0,0 +1,16 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 by Ralf Baechle + */ +#ifndef __ASM_TLBDEBUG_H +#define __ASM_TLBDEBUG_H + +/* + * TLB debugging functions: + */ +extern void dump_tlb_all(void); + +#endif /* __ASM_TLBDEBUG_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/tlbflush.h b/target/linux/realtek/files/arch/rlx/include/asm/tlbflush.h new file mode 100644 index 000000000..8d6b12341 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/tlbflush.h @@ -0,0 +1,33 @@ +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr,end) \ + local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#endif /* __ASM_TLBFLUSH_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/topology.h b/target/linux/realtek/files/arch/rlx/include/asm/topology.h new file mode 100644 index 000000000..20ea4859c --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/topology.h @@ -0,0 +1,13 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 by Ralf Baechle + */ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + +#include <topology.h> + +#endif /* __ASM_TOPOLOGY_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/traps.h b/target/linux/realtek/files/arch/rlx/include/asm/traps.h new file mode 100644 index 000000000..63f374114 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/traps.h @@ -0,0 +1,21 @@ +/* + * Trap handling definitions. + * + * Copyright (C) 2002, 2003 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_TRAPS_H +#define _ASM_TRAPS_H + +/* + * Possible status responses for a board_be_handler backend. + */ +#define MIPS_BE_DISCARD 0 /* return with no action */ +#define MIPS_BE_FIXUP 1 /* return to the fixup code */ +#define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */ + +#endif /* _ASM_TRAPS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/types.h b/target/linux/realtek/files/arch/rlx/include/asm/types.h new file mode 100644 index 000000000..e9059901f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/types.h @@ -0,0 +1,47 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle + * Copyright (C) 2008 Wind River Systems, + * written by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H + +/* + * We don't use int-l64.h for the kernel anymore but still use it for + * userspace to avoid code changes. + */ +#include <asm-generic/int-ll64.h> + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +//#define BITS_PER_LONG _MIPS_SZLONG + +#ifndef __ASSEMBLY__ + +typedef u32 dma_addr_t; +typedef u64 dma64_addr_t; + +/* + * Don't use phys_t. You've been warned. + */ +typedef unsigned long phys_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_TYPES_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/uaccess.h b/target/linux/realtek/files/arch/rlx/include/asm/uaccess.h new file mode 100644 index 000000000..064f4af72 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/uaccess.h @@ -0,0 +1,1088 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + */ +#ifndef _ASM_UACCESS_H +#define _ASM_UACCESS_H + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/thread_info.h> + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ +#define __UA_LIMIT 0x80000000UL + +#define __UA_ADDR ".word" +#define __UA_LA "la" +#define __UA_ADDU "addu" +#define __UA_t0 "$8" +#define __UA_t1 "$9" + + +/* + * USER_DS is a bitmask that has the bits set that may not be set in a valid + * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but + * the arithmetic we're doing only works if the limit is a power of two, so + * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid + * address in this range it's the process's problem, not ours :-) + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { __UA_LIMIT }) + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a, b) ((a).seg == (b).seg) + + +/* + * Is a address valid? This does a straighforward calculation rather + * than tests. + * + * Address valid if: + * - "addr" doesn't have any high-bits set + * - AND "size" doesn't have any high-bits set + * - AND "addr+size" doesn't have any high-bits set + * - OR we are in kernel mode. + * + * __ua_size() is a trick to avoid runtime checking of positive constant + * sizes; for those we already know at compile time that the size is ok. + */ +#define __ua_size(size) \ + ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) + +/* + * access_ok: - Checks if a user space pointer is valid + * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that + * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe + * to write to a block, it is always safe to read from it. + * @addr: User space pointer to start of block to check + * @size: Size of block to check + * + * Context: User context only. This function may sleep. + * + * Checks if a pointer to a block of memory in user space is valid. + * + * Returns true (nonzero) if the memory block may be valid, false (zero) + * if it is definitely invalid. + * + * Note that, depending on architecture, this function probably just + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ + +#define __access_mask get_fs().seg + +#define __access_ok(addr, size, mask) \ +({ \ + unsigned long __addr = (unsigned long) (addr); \ + unsigned long __size = size; \ + unsigned long __mask = mask; \ + unsigned long __ok; \ + \ + __chk_user_ptr(addr); \ + __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ + __ua_size(__size))); \ + __ok == 0; \ +}) + +#define access_ok(type, addr, size) \ + likely(__access_ok((addr), (size), __access_mask)) + +/* + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x,ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x,ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * __put_user: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user(x,ptr) \ + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) + +/* + * __get_user: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x,ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) + +extern void __get_user_unknown(void); + +#define __get_user_common(val, size, ptr) \ +do { \ + switch (size) { \ + case 1: __get_user_asm(val, "lb", ptr); break; \ + case 2: __get_user_asm(val, "lh", ptr); break; \ + case 4: __get_user_asm(val, "lw", ptr); break; \ + case 8: __GET_USER_DW(val, ptr); break; \ + default: __get_user_unknown(); break; \ + } \ +} while (0) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + int __gu_err; \ + \ + __chk_user_ptr(ptr); \ + __get_user_common((x), size, ptr); \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + int __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ + \ + might_fault(); \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_common((x), size, __gu_ptr); \ + \ + __gu_err; \ +}) + +#define __get_user_asm(val, insn, addr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: " insn " %1, %3 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 3b \n" \ + " .previous \n" \ + : "=r" (__gu_err), "=r" (__gu_tmp) \ + : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ + \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ +} + +/* + * Get a long long 64 using 32 bit registers. + */ +#define __get_user_asm_ll32(val, addr) \ +{ \ + union { \ + unsigned long long l; \ + __typeof__(*(addr)) t; \ + } __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: lw %1, (%3) \n" \ + "2: lw %D1, 4(%3) \n" \ + "3: .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " move %1, $0 \n" \ + " move %D1, $0 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " .previous \n" \ + : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ + : "0" (0), "r" (addr), "i" (-EFAULT)); \ + \ + (val) = __gu_tmp.t; \ +} + +#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __pu_val; \ + int __pu_err = 0; \ + \ + __chk_user_ptr(ptr); \ + __pu_val = (x); \ + switch (size) { \ + case 1: __put_user_asm("sb", ptr); break; \ + case 2: __put_user_asm("sh", ptr); break; \ + case 4: __put_user_asm("sw", ptr); break; \ + case 8: __PUT_USER_DW(ptr); break; \ + default: __put_user_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = -EFAULT; \ + \ + might_fault(); \ + if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ + switch (size) { \ + case 1: __put_user_asm("sb", __pu_addr); break; \ + case 2: __put_user_asm("sh", __pu_addr); break; \ + case 4: __put_user_asm("sw", __pu_addr); break; \ + case 8: __PUT_USER_DW(__pu_addr); break; \ + default: __put_user_unknown(); break; \ + } \ + } \ + __pu_err; \ +}) + +#define __put_user_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + "1: " insn " %z2, %3 # __put_user_asm\n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 3b \n" \ + " .previous \n" \ + : "=r" (__pu_err) \ + : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ + "i" (-EFAULT)); \ +} + +#define __put_user_asm_ll32(ptr) \ +{ \ + __asm__ __volatile__( \ + "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ + "2: sw %D2, 4(%3) \n" \ + "3: \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " .previous" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "r" (ptr), \ + "i" (-EFAULT)); \ +} + +extern void __put_user_unknown(void); + +/* + * put_user_unaligned: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user_unaligned(x,ptr) \ + __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) + +/* + * get_user_unaligned: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user_unaligned(x,ptr) \ + __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) + +/* + * __put_user_unaligned: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user_unaligned(x,ptr) \ + __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) + +/* + * __get_user_unaligned: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user_unaligned(x,ptr) \ + __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) + +#define __GET_USER_UNALIGNED_DW(val, ptr) \ + __get_user_unaligned_asm_ll32(val, ptr) + +extern void __get_user_unaligned_unknown(void); + +#define __get_user_unaligned_common(val, size, ptr) \ +do { \ + switch (size) { \ + case 1: __get_user_asm(val, "lb", ptr); break; \ + case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ + case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ + case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ + default: __get_user_unaligned_unknown(); break; \ + } \ +} while (0) + +#define __get_user_unaligned_nocheck(x,ptr,size) \ +({ \ + int __gu_err; \ + \ + __get_user_unaligned_common((x), size, ptr); \ + __gu_err; \ +}) + +#define __get_user_unaligned_check(x,ptr,size) \ +({ \ + int __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ + \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_unaligned_common((x), size, __gu_ptr); \ + \ + __gu_err; \ +}) + +#define __get_user_unaligned_asm(val, insn, addr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: " insn " %1, %3 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 3b \n" \ + " "__UA_ADDR "\t1b + 4, 3b \n" \ + " .previous \n" \ + : "=r" (__gu_err), "=r" (__gu_tmp) \ + : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ + \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ +} + +/* + * Get a long long 64 using 32 bit registers. + */ +#define __get_user_unaligned_asm_ll32(val, addr) \ +{ \ + unsigned long long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1: ulw %1, (%3) \n" \ + "2: ulw %D1, 4(%3) \n" \ + " move %0, $0 \n" \ + "3: .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " move %1, $0 \n" \ + " move %D1, $0 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 1b + 4, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " " __UA_ADDR " 2b + 4, 4b \n" \ + " .previous \n" \ + : "=r" (__gu_err), "=&r" (__gu_tmp) \ + : "0" (0), "r" (addr), "i" (-EFAULT)); \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ +} + +#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) + +#define __put_user_unaligned_nocheck(x,ptr,size) \ +({ \ + __typeof__(*(ptr)) __pu_val; \ + int __pu_err = 0; \ + \ + __pu_val = (x); \ + switch (size) { \ + case 1: __put_user_asm("sb", ptr); break; \ + case 2: __put_user_unaligned_asm("ush", ptr); break; \ + case 4: __put_user_unaligned_asm("usw", ptr); break; \ + case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ + default: __put_user_unaligned_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_user_unaligned_check(x,ptr,size) \ +({ \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = -EFAULT; \ + \ + if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ + switch (size) { \ + case 1: __put_user_asm("sb", __pu_addr); break; \ + case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ + case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ + case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \ + default: __put_user_unaligned_unknown(); break; \ + } \ + } \ + __pu_err; \ +}) + +#define __put_user_unaligned_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 3b \n" \ + " .previous \n" \ + : "=r" (__pu_err) \ + : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ + "i" (-EFAULT)); \ +} + +#define __put_user_unaligned_asm_ll32(ptr) \ +{ \ + __asm__ __volatile__( \ + "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ + "2: sw %D2, 4(%3) \n" \ + "3: \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 1b + 4, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " " __UA_ADDR " 2b + 4, 4b \n" \ + " .previous" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "r" (ptr), \ + "i" (-EFAULT)); \ +} + +extern void __put_user_unaligned_unknown(void); + +/* + * We're generating jump to subroutines which will be outside the range of + * jump instructions + */ +#ifdef MODULE +#define __MODULE_JAL(destination) \ + ".set\tnoat\n\t" \ + __UA_LA "\t$1, " #destination "\n\t" \ + "jalr\t$1\n\t" \ + ".set\tat\n\t" +#else +#define __MODULE_JAL(destination) \ + "jal\t" #destination "\n\t" +#endif + +#define DADDI_SCRATCH "$0" + +extern size_t __copy_user(void *__to, const void *__from, size_t __n); + +#define __invoke_copy_to_user(to, from, n) \ +({ \ + register void __user *__cu_to_r __asm__("$4"); \ + register const void *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + __MODULE_JAL(__copy_user) \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +/* + * __copy_to_user: - Copy a block of data into user space, with less checking. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +#define __copy_to_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + __cu_len; \ +}) + +extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); + +#define __copy_to_user_inatomic(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + __cu_len; \ +}) + +#define __copy_from_user_inatomic(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) + +/* + * copy_to_user: - Copy a block of data into user space. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +#define copy_to_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) + +#define __invoke_copy_from_user(to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_user) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_user_inatomic) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +/* + * __copy_from_user: - Copy a block of data from user space, with less checking. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +#define __copy_from_user(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) + +/* + * copy_from_user: - Copy a block of data from user space. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +#define copy_from_user(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) + +#define __copy_in_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) + +#define copy_in_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ + access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +static inline __kernel_size_t +__clear_user(void __user *addr, __kernel_size_t size) +{ + __kernel_size_t res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + + return res; +} + +#define clear_user(addr,n) \ +({ \ + void __user * __cl_addr = (addr); \ + unsigned long __cl_size = (n); \ + if (__cl_size && access_ok(VERIFY_WRITE, \ + __cl_addr, __cl_size)) \ + __cl_size = __clear_user(__cl_addr, __cl_size); \ + __cl_size; \ +}) + +/* + * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * Caller must check the specified block with access_ok() before calling + * this function. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +static inline long +__strncpy_from_user(char *__to, const char __user *__from, long __len) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_user_nocheck_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); + + return res; +} + +/* + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +static inline long +strncpy_from_user(char *__to, const char __user *__from, long __len) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); + + return res; +} + +/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ +static inline long __strlen_user(const char __user *s) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + __MODULE_JAL(__strlen_user_nocheck_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s) + : "$2", "$4", __UA_t0, "$31"); + + return res; +} + +/* + * strlen_user: - Get the size of a string in user space. + * @str: The string to measure. + * + * Context: User context only. This function may sleep. + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * + * If there is a limit on the length of a valid string, you may wish to + * consider using strnlen_user() instead. + */ +static inline long strlen_user(const char __user *s) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + __MODULE_JAL(__strlen_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s) + : "$2", "$4", __UA_t0, "$31"); + + return res; +} + +/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ +static inline long __strnlen_user(const char __user *s, long n) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_user_nocheck_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); + + return res; +} + +/* + * strlen_user: - Get the size of a string in user space. + * @str: The string to measure. + * + * Context: User context only. This function may sleep. + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * + * If there is a limit on the length of a valid string, you may wish to + * consider using strnlen_user() instead. + */ +static inline long strnlen_user(const char __user *s, long n) +{ + long res; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); + + return res; +} + +struct exception_table_entry +{ + unsigned long insn; + unsigned long nextinsn; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#endif /* _ASM_UACCESS_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/ucontext.h b/target/linux/realtek/files/arch/rlx/include/asm/ucontext.h new file mode 100644 index 000000000..8a4b20e88 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/ucontext.h @@ -0,0 +1,21 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Low level exception handling + * + * Copyright (C) 1998, 1999 by Ralf Baechle + */ +#ifndef _ASM_UCONTEXT_H +#define _ASM_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _ASM_UCONTEXT_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/unaligned.h b/target/linux/realtek/files/arch/rlx/include/asm/unaligned.h new file mode 100644 index 000000000..d1e422282 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/unaligned.h @@ -0,0 +1,26 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef _ASM_MIPS_UNALIGNED_H +#define _ASM_MIPS_UNALIGNED_H + +#include <linux/compiler.h> +#ifdef CONFIG_CPU_BIG_ENDIAN +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#endif + +#endif /* _ASM_MIPS_UNALIGNED_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/unistd.h b/target/linux/realtek/files/arch/rlx/include/asm/unistd.h new file mode 100644 index 000000000..55be21563 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/unistd.h @@ -0,0 +1,409 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * + * Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto + * the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A + */ +#ifndef _ASM_UNISTD_H +#define _ASM_UNISTD_H + +#include <asm/sgidefs.h> + +/* + * Linux o32 style syscalls are in the range from 4000 to 4999. + */ +#define __NR_Linux 4000 +#define __NR_syscall (__NR_Linux + 0) +#define __NR_exit (__NR_Linux + 1) +#define __NR_fork (__NR_Linux + 2) +#define __NR_read (__NR_Linux + 3) +#define __NR_write (__NR_Linux + 4) +#define __NR_open (__NR_Linux + 5) +#define __NR_close (__NR_Linux + 6) +#define __NR_waitpid (__NR_Linux + 7) +#define __NR_creat (__NR_Linux + 8) +#define __NR_link (__NR_Linux + 9) +#define __NR_unlink (__NR_Linux + 10) +#define __NR_execve (__NR_Linux + 11) +#define __NR_chdir (__NR_Linux + 12) +#define __NR_time (__NR_Linux + 13) +#define __NR_mknod (__NR_Linux + 14) +#define __NR_chmod (__NR_Linux + 15) +#define __NR_lchown (__NR_Linux + 16) +#define __NR_break (__NR_Linux + 17) +#define __NR_unused18 (__NR_Linux + 18) +#define __NR_lseek (__NR_Linux + 19) +#define __NR_getpid (__NR_Linux + 20) +#define __NR_mount (__NR_Linux + 21) +#define __NR_umount (__NR_Linux + 22) +#define __NR_setuid (__NR_Linux + 23) +#define __NR_getuid (__NR_Linux + 24) +#define __NR_stime (__NR_Linux + 25) +#define __NR_ptrace (__NR_Linux + 26) +#define __NR_alarm (__NR_Linux + 27) +#define __NR_unused28 (__NR_Linux + 28) +#define __NR_pause (__NR_Linux + 29) +#define __NR_utime (__NR_Linux + 30) +#define __NR_stty (__NR_Linux + 31) +#define __NR_gtty (__NR_Linux + 32) +#define __NR_access (__NR_Linux + 33) +#define __NR_nice (__NR_Linux + 34) +#define __NR_ftime (__NR_Linux + 35) +#define __NR_sync (__NR_Linux + 36) +#define __NR_kill (__NR_Linux + 37) +#define __NR_rename (__NR_Linux + 38) +#define __NR_mkdir (__NR_Linux + 39) +#define __NR_rmdir (__NR_Linux + 40) +#define __NR_dup (__NR_Linux + 41) +#define __NR_pipe (__NR_Linux + 42) +#define __NR_times (__NR_Linux + 43) +#define __NR_prof (__NR_Linux + 44) +#define __NR_brk (__NR_Linux + 45) +#define __NR_setgid (__NR_Linux + 46) +#define __NR_getgid (__NR_Linux + 47) +#define __NR_signal (__NR_Linux + 48) +#define __NR_geteuid (__NR_Linux + 49) +#define __NR_getegid (__NR_Linux + 50) +#define __NR_acct (__NR_Linux + 51) +#define __NR_umount2 (__NR_Linux + 52) +#define __NR_lock (__NR_Linux + 53) +#define __NR_ioctl (__NR_Linux + 54) +#define __NR_fcntl (__NR_Linux + 55) +#define __NR_mpx (__NR_Linux + 56) +#define __NR_setpgid (__NR_Linux + 57) +#define __NR_ulimit (__NR_Linux + 58) +#define __NR_unused59 (__NR_Linux + 59) +#define __NR_umask (__NR_Linux + 60) +#define __NR_chroot (__NR_Linux + 61) +#define __NR_ustat (__NR_Linux + 62) +#define __NR_dup2 (__NR_Linux + 63) +#define __NR_getppid (__NR_Linux + 64) +#define __NR_getpgrp (__NR_Linux + 65) +#define __NR_setsid (__NR_Linux + 66) +#define __NR_sigaction (__NR_Linux + 67) +#define __NR_sgetmask (__NR_Linux + 68) +#define __NR_ssetmask (__NR_Linux + 69) +#define __NR_setreuid (__NR_Linux + 70) +#define __NR_setregid (__NR_Linux + 71) +#define __NR_sigsuspend (__NR_Linux + 72) +#define __NR_sigpending (__NR_Linux + 73) +#define __NR_sethostname (__NR_Linux + 74) +#define __NR_setrlimit (__NR_Linux + 75) +#define __NR_getrlimit (__NR_Linux + 76) +#define __NR_getrusage (__NR_Linux + 77) +#define __NR_gettimeofday (__NR_Linux + 78) +#define __NR_settimeofday (__NR_Linux + 79) +#define __NR_getgroups (__NR_Linux + 80) +#define __NR_setgroups (__NR_Linux + 81) +#define __NR_reserved82 (__NR_Linux + 82) +#define __NR_symlink (__NR_Linux + 83) +#define __NR_unused84 (__NR_Linux + 84) +#define __NR_readlink (__NR_Linux + 85) +#define __NR_uselib (__NR_Linux + 86) +#define __NR_swapon (__NR_Linux + 87) +#define __NR_reboot (__NR_Linux + 88) +#define __NR_readdir (__NR_Linux + 89) +#define __NR_mmap (__NR_Linux + 90) +#define __NR_munmap (__NR_Linux + 91) +#define __NR_truncate (__NR_Linux + 92) +#define __NR_ftruncate (__NR_Linux + 93) +#define __NR_fchmod (__NR_Linux + 94) +#define __NR_fchown (__NR_Linux + 95) +#define __NR_getpriority (__NR_Linux + 96) +#define __NR_setpriority (__NR_Linux + 97) +#define __NR_profil (__NR_Linux + 98) +#define __NR_statfs (__NR_Linux + 99) +#define __NR_fstatfs (__NR_Linux + 100) +#define __NR_ioperm (__NR_Linux + 101) +#define __NR_socketcall (__NR_Linux + 102) +#define __NR_syslog (__NR_Linux + 103) +#define __NR_setitimer (__NR_Linux + 104) +#define __NR_getitimer (__NR_Linux + 105) +#define __NR_stat (__NR_Linux + 106) +#define __NR_lstat (__NR_Linux + 107) +#define __NR_fstat (__NR_Linux + 108) +#define __NR_unused109 (__NR_Linux + 109) +#define __NR_iopl (__NR_Linux + 110) +#define __NR_vhangup (__NR_Linux + 111) +#define __NR_idle (__NR_Linux + 112) +#define __NR_vm86 (__NR_Linux + 113) +#define __NR_wait4 (__NR_Linux + 114) +#define __NR_swapoff (__NR_Linux + 115) +#define __NR_sysinfo (__NR_Linux + 116) +#define __NR_ipc (__NR_Linux + 117) +#define __NR_fsync (__NR_Linux + 118) +#define __NR_sigreturn (__NR_Linux + 119) +#define __NR_clone (__NR_Linux + 120) +#define __NR_setdomainname (__NR_Linux + 121) +#define __NR_uname (__NR_Linux + 122) +#define __NR_modify_ldt (__NR_Linux + 123) +#define __NR_adjtimex (__NR_Linux + 124) +#define __NR_mprotect (__NR_Linux + 125) +#define __NR_sigprocmask (__NR_Linux + 126) +#define __NR_create_module (__NR_Linux + 127) +#define __NR_init_module (__NR_Linux + 128) +#define __NR_delete_module (__NR_Linux + 129) +#define __NR_get_kernel_syms (__NR_Linux + 130) +#define __NR_quotactl (__NR_Linux + 131) +#define __NR_getpgid (__NR_Linux + 132) +#define __NR_fchdir (__NR_Linux + 133) +#define __NR_bdflush (__NR_Linux + 134) +#define __NR_sysfs (__NR_Linux + 135) +#define __NR_personality (__NR_Linux + 136) +#define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */ +#define __NR_setfsuid (__NR_Linux + 138) +#define __NR_setfsgid (__NR_Linux + 139) +#define __NR__llseek (__NR_Linux + 140) +#define __NR_getdents (__NR_Linux + 141) +#define __NR__newselect (__NR_Linux + 142) +#define __NR_flock (__NR_Linux + 143) +#define __NR_msync (__NR_Linux + 144) +#define __NR_readv (__NR_Linux + 145) +#define __NR_writev (__NR_Linux + 146) +#define __NR_cacheflush (__NR_Linux + 147) +#define __NR_cachectl (__NR_Linux + 148) +#define __NR_sysmips (__NR_Linux + 149) +#define __NR_unused150 (__NR_Linux + 150) +#define __NR_getsid (__NR_Linux + 151) +#define __NR_fdatasync (__NR_Linux + 152) +#define __NR__sysctl (__NR_Linux + 153) +#define __NR_mlock (__NR_Linux + 154) +#define __NR_munlock (__NR_Linux + 155) +#define __NR_mlockall (__NR_Linux + 156) +#define __NR_munlockall (__NR_Linux + 157) +#define __NR_sched_setparam (__NR_Linux + 158) +#define __NR_sched_getparam (__NR_Linux + 159) +#define __NR_sched_setscheduler (__NR_Linux + 160) +#define __NR_sched_getscheduler (__NR_Linux + 161) +#define __NR_sched_yield (__NR_Linux + 162) +#define __NR_sched_get_priority_max (__NR_Linux + 163) +#define __NR_sched_get_priority_min (__NR_Linux + 164) +#define __NR_sched_rr_get_interval (__NR_Linux + 165) +#define __NR_nanosleep (__NR_Linux + 166) +#define __NR_mremap (__NR_Linux + 167) +#define __NR_accept (__NR_Linux + 168) +#define __NR_bind (__NR_Linux + 169) +#define __NR_connect (__NR_Linux + 170) +#define __NR_getpeername (__NR_Linux + 171) +#define __NR_getsockname (__NR_Linux + 172) +#define __NR_getsockopt (__NR_Linux + 173) +#define __NR_listen (__NR_Linux + 174) +#define __NR_recv (__NR_Linux + 175) +#define __NR_recvfrom (__NR_Linux + 176) +#define __NR_recvmsg (__NR_Linux + 177) +#define __NR_send (__NR_Linux + 178) +#define __NR_sendmsg (__NR_Linux + 179) +#define __NR_sendto (__NR_Linux + 180) +#define __NR_setsockopt (__NR_Linux + 181) +#define __NR_shutdown (__NR_Linux + 182) +#define __NR_socket (__NR_Linux + 183) +#define __NR_socketpair (__NR_Linux + 184) +#define __NR_setresuid (__NR_Linux + 185) +#define __NR_getresuid (__NR_Linux + 186) +#define __NR_query_module (__NR_Linux + 187) +#define __NR_poll (__NR_Linux + 188) +#define __NR_nfsservctl (__NR_Linux + 189) +#define __NR_setresgid (__NR_Linux + 190) +#define __NR_getresgid (__NR_Linux + 191) +#define __NR_prctl (__NR_Linux + 192) +#define __NR_rt_sigreturn (__NR_Linux + 193) +#define __NR_rt_sigaction (__NR_Linux + 194) +#define __NR_rt_sigprocmask (__NR_Linux + 195) +#define __NR_rt_sigpending (__NR_Linux + 196) +#define __NR_rt_sigtimedwait (__NR_Linux + 197) +#define __NR_rt_sigqueueinfo (__NR_Linux + 198) +#define __NR_rt_sigsuspend (__NR_Linux + 199) +#define __NR_pread64 (__NR_Linux + 200) +#define __NR_pwrite64 (__NR_Linux + 201) +#define __NR_chown (__NR_Linux + 202) +#define __NR_getcwd (__NR_Linux + 203) +#define __NR_capget (__NR_Linux + 204) +#define __NR_capset (__NR_Linux + 205) +#define __NR_sigaltstack (__NR_Linux + 206) +#define __NR_sendfile (__NR_Linux + 207) +#define __NR_getpmsg (__NR_Linux + 208) +#define __NR_putpmsg (__NR_Linux + 209) +#define __NR_mmap2 (__NR_Linux + 210) +#define __NR_truncate64 (__NR_Linux + 211) +#define __NR_ftruncate64 (__NR_Linux + 212) +#define __NR_stat64 (__NR_Linux + 213) +#define __NR_lstat64 (__NR_Linux + 214) +#define __NR_fstat64 (__NR_Linux + 215) +#define __NR_pivot_root (__NR_Linux + 216) +#define __NR_mincore (__NR_Linux + 217) +#define __NR_madvise (__NR_Linux + 218) +#define __NR_getdents64 (__NR_Linux + 219) +#define __NR_fcntl64 (__NR_Linux + 220) +#define __NR_reserved221 (__NR_Linux + 221) +#define __NR_gettid (__NR_Linux + 222) +#define __NR_readahead (__NR_Linux + 223) +#define __NR_setxattr (__NR_Linux + 224) +#define __NR_lsetxattr (__NR_Linux + 225) +#define __NR_fsetxattr (__NR_Linux + 226) +#define __NR_getxattr (__NR_Linux + 227) +#define __NR_lgetxattr (__NR_Linux + 228) +#define __NR_fgetxattr (__NR_Linux + 229) +#define __NR_listxattr (__NR_Linux + 230) +#define __NR_llistxattr (__NR_Linux + 231) +#define __NR_flistxattr (__NR_Linux + 232) +#define __NR_removexattr (__NR_Linux + 233) +#define __NR_lremovexattr (__NR_Linux + 234) +#define __NR_fremovexattr (__NR_Linux + 235) +#define __NR_tkill (__NR_Linux + 236) +#define __NR_sendfile64 (__NR_Linux + 237) +#define __NR_futex (__NR_Linux + 238) +#define __NR_sched_setaffinity (__NR_Linux + 239) +#define __NR_sched_getaffinity (__NR_Linux + 240) +#define __NR_io_setup (__NR_Linux + 241) +#define __NR_io_destroy (__NR_Linux + 242) +#define __NR_io_getevents (__NR_Linux + 243) +#define __NR_io_submit (__NR_Linux + 244) +#define __NR_io_cancel (__NR_Linux + 245) +#define __NR_exit_group (__NR_Linux + 246) +#define __NR_lookup_dcookie (__NR_Linux + 247) +#define __NR_epoll_create (__NR_Linux + 248) +#define __NR_epoll_ctl (__NR_Linux + 249) +#define __NR_epoll_wait (__NR_Linux + 250) +#define __NR_remap_file_pages (__NR_Linux + 251) +#define __NR_set_tid_address (__NR_Linux + 252) +#define __NR_restart_syscall (__NR_Linux + 253) +#define __NR_fadvise64 (__NR_Linux + 254) +#define __NR_statfs64 (__NR_Linux + 255) +#define __NR_fstatfs64 (__NR_Linux + 256) +#define __NR_timer_create (__NR_Linux + 257) +#define __NR_timer_settime (__NR_Linux + 258) +#define __NR_timer_gettime (__NR_Linux + 259) +#define __NR_timer_getoverrun (__NR_Linux + 260) +#define __NR_timer_delete (__NR_Linux + 261) +#define __NR_clock_settime (__NR_Linux + 262) +#define __NR_clock_gettime (__NR_Linux + 263) +#define __NR_clock_getres (__NR_Linux + 264) +#define __NR_clock_nanosleep (__NR_Linux + 265) +#define __NR_tgkill (__NR_Linux + 266) +#define __NR_utimes (__NR_Linux + 267) +#define __NR_mbind (__NR_Linux + 268) +#define __NR_get_mempolicy (__NR_Linux + 269) +#define __NR_set_mempolicy (__NR_Linux + 270) +#define __NR_mq_open (__NR_Linux + 271) +#define __NR_mq_unlink (__NR_Linux + 272) +#define __NR_mq_timedsend (__NR_Linux + 273) +#define __NR_mq_timedreceive (__NR_Linux + 274) +#define __NR_mq_notify (__NR_Linux + 275) +#define __NR_mq_getsetattr (__NR_Linux + 276) +#define __NR_vserver (__NR_Linux + 277) +#define __NR_waitid (__NR_Linux + 278) +/* #define __NR_sys_setaltroot (__NR_Linux + 279) */ +#define __NR_add_key (__NR_Linux + 280) +#define __NR_request_key (__NR_Linux + 281) +#define __NR_keyctl (__NR_Linux + 282) +#define __NR_set_thread_area (__NR_Linux + 283) +#define __NR_inotify_init (__NR_Linux + 284) +#define __NR_inotify_add_watch (__NR_Linux + 285) +#define __NR_inotify_rm_watch (__NR_Linux + 286) +#define __NR_migrate_pages (__NR_Linux + 287) +#define __NR_openat (__NR_Linux + 288) +#define __NR_mkdirat (__NR_Linux + 289) +#define __NR_mknodat (__NR_Linux + 290) +#define __NR_fchownat (__NR_Linux + 291) +#define __NR_futimesat (__NR_Linux + 292) +#define __NR_fstatat64 (__NR_Linux + 293) +#define __NR_unlinkat (__NR_Linux + 294) +#define __NR_renameat (__NR_Linux + 295) +#define __NR_linkat (__NR_Linux + 296) +#define __NR_symlinkat (__NR_Linux + 297) +#define __NR_readlinkat (__NR_Linux + 298) +#define __NR_fchmodat (__NR_Linux + 299) +#define __NR_faccessat (__NR_Linux + 300) +#define __NR_pselect6 (__NR_Linux + 301) +#define __NR_ppoll (__NR_Linux + 302) +#define __NR_unshare (__NR_Linux + 303) +#define __NR_splice (__NR_Linux + 304) +#define __NR_sync_file_range (__NR_Linux + 305) +#define __NR_tee (__NR_Linux + 306) +#define __NR_vmsplice (__NR_Linux + 307) +#define __NR_move_pages (__NR_Linux + 308) +#define __NR_set_robust_list (__NR_Linux + 309) +#define __NR_get_robust_list (__NR_Linux + 310) +#define __NR_kexec_load (__NR_Linux + 311) +#define __NR_getcpu (__NR_Linux + 312) +#define __NR_epoll_pwait (__NR_Linux + 313) +#define __NR_ioprio_set (__NR_Linux + 314) +#define __NR_ioprio_get (__NR_Linux + 315) +#define __NR_utimensat (__NR_Linux + 316) +#define __NR_signalfd (__NR_Linux + 317) +#define __NR_timerfd (__NR_Linux + 318) +#define __NR_eventfd (__NR_Linux + 319) +#define __NR_fallocate (__NR_Linux + 320) +#define __NR_timerfd_create (__NR_Linux + 321) +#define __NR_timerfd_gettime (__NR_Linux + 322) +#define __NR_timerfd_settime (__NR_Linux + 323) +#define __NR_signalfd4 (__NR_Linux + 324) +#define __NR_eventfd2 (__NR_Linux + 325) +#define __NR_epoll_create1 (__NR_Linux + 326) +#define __NR_dup3 (__NR_Linux + 327) +#define __NR_pipe2 (__NR_Linux + 328) +#define __NR_inotify_init1 (__NR_Linux + 329) +#define __NR_preadv (__NR_Linux + 330) +#define __NR_pwritev (__NR_Linux + 331) +#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) +#define __NR_perf_counter_open (__NR_Linux + 333) +#define __NR_accept4 (__NR_Linux + 334) + +/* + * Offset of the last Linux o32 flavoured syscall + */ +#define __NR_Linux_syscalls 334 + +#define __NR_O32_Linux 4000 +#define __NR_O32_Linux_syscalls 331 + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_TIME + +/* whitelists for checksyscalls */ +#define __IGNORE_select +#define __IGNORE_vfork +#define __IGNORE_time +#define __IGNORE_uselib +#define __IGNORE_fadvise64_64 +#define __IGNORE_getdents64 + +#endif /* !__ASSEMBLY__ */ + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall") + +#endif /* __KERNEL__ */ +#endif /* _ASM_UNISTD_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/user.h b/target/linux/realtek/files/arch/rlx/include/asm/user.h new file mode 100644 index 000000000..afa83a4c1 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/user.h @@ -0,0 +1,58 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle + */ +#ifndef _ASM_USER_H +#define _ASM_USER_H + +#include <asm/page.h> +#include <asm/reg.h> + +/* + * Core file format: The core file is written in such a way that gdb + * can understand it and provide useful information to the user (under + * linux we use the `trad-core' bfd, NOT the irix-core). The file + * contents are as follows: + * + * upage: 1 page consisting of a user struct that tells gdb + * what is present in the file. Directly after this is a + * copy of the task_struct, which is currently not used by gdb, + * but it may come in handy at some point. All of the registers + * are stored as part of the upage. The upage should always be + * only one page long. + * data: The data segment follows next. We use current->end_text to + * current->brk to pick up all of the user variables, plus any memory + * that may have been sbrk'ed. No attempt is made to determine if a + * page is demand-zero or if a page is totally unused, we just cover + * the entire range. All of the addresses are rounded in such a way + * that an integral number of pages is written. + * stack: We need the stack information in order to get a meaningful + * backtrace. We need to write the data from usp to + * current->start_stack, so we round each of these in order to be able + * to write an integer number of pages. + */ +struct user { + unsigned long regs[EF_SIZE / /* integer and fp regs */ + sizeof(unsigned long) + 64]; + size_t u_tsize; /* text size (pages) */ + size_t u_dsize; /* data size (pages) */ + size_t u_ssize; /* stack size (pages) */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* signal causing core dump */ + unsigned long u_ar0; /* help gdb find registers */ + unsigned long magic; /* identifies a core file */ + char u_comm[32]; /* user command name */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_DATA_START_ADDR (u.start_data) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _ASM_USER_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/vga.h b/target/linux/realtek/files/arch/rlx/include/asm/vga.h new file mode 100644 index 000000000..f4cff7e4f --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/vga.h @@ -0,0 +1,47 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares <mj@ucw.cz> + */ +#ifndef _ASM_VGA_H +#define _ASM_VGA_H + +#include <asm/byteorder.h> + +/* + * On the PC, we can just recalculate addresses and then + * access the videoram directly without any black magic. + */ + +#define VGA_MAP_MEM(x, s) (0xb0000000L + (unsigned long)(x)) + +#define vga_readb(x) (*(x)) +#define vga_writeb(x, y) (*(y) = (x)) + +#define VT_BUF_HAVE_RW +/* + * These are only needed for supporting VGA or MDA text mode, which use little + * endian byte ordering. + * In other cases, we can optimize by using native byte ordering and + * <linux/vt_buffer.h> has already done the right job for us. + */ + +#undef scr_writew +#undef scr_readw + +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + *addr = cpu_to_le16(val); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return le16_to_cpu(*addr); +} + +#define scr_memcpyw(d, s, c) memcpy(d, s, c) +#define scr_memmovew(d, s, c) memmove(d, s, c) +#define VT_BUF_HAVE_MEMCPYW +#define VT_BUF_HAVE_MEMMOVEW + +#endif /* _ASM_VGA_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/wbflush.h b/target/linux/realtek/files/arch/rlx/include/asm/wbflush.h new file mode 100644 index 000000000..6f0df455a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/wbflush.h @@ -0,0 +1,32 @@ +/* + * Header file for using the wbflush routine + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1998 Harald Koerfgen + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef _ASM_WBFLUSH_H +#define _ASM_WBFLUSH_H + +#if defined(CONFIG_CPU_HAS_WB) + +extern void (*__wbflush)(void); +extern void wbflush_setup(void); + +#define wbflush() \ + do { \ + __sync(); \ + __wbflush(); \ + } while (0) + +#else /* !CONFIG_CPU_HAS_WB */ + +#define wbflush_setup() do { } while (0) +#define wbflush() fast_iob() + +#endif /* !CONFIG_CPU_HAS_WB */ + +#endif /* _ASM_WBFLUSH_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/xor.h b/target/linux/realtek/files/arch/rlx/include/asm/xor.h new file mode 100644 index 000000000..c82eb12a5 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/xor.h @@ -0,0 +1 @@ +#include <asm-generic/xor.h> diff --git a/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xtalk.h b/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xtalk.h new file mode 100644 index 000000000..79bac882a --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xtalk.h @@ -0,0 +1,52 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * xtalk.h -- platform-independent crosstalk interface, derived from + * IRIX <sys/PCI/bridge.h>, revision 1.38. + * + * Copyright (C) 1995 - 1997, 1999 Silcon Graphics, Inc. + * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) + */ +#ifndef _ASM_XTALK_XTALK_H +#define _ASM_XTALK_XTALK_H + +#ifndef __ASSEMBLY__ +/* + * User-level device driver visible types + */ +typedef char xwidgetnum_t; /* xtalk widget number (0..15) */ + +#define XWIDGET_NONE -1 + +typedef int xwidget_part_num_t; /* xtalk widget part number */ + +#define XWIDGET_PART_NUM_NONE -1 + +typedef int xwidget_rev_num_t; /* xtalk widget revision number */ + +#define XWIDGET_REV_NUM_NONE -1 + +typedef int xwidget_mfg_num_t; /* xtalk widget manufacturing ID */ + +#define XWIDGET_MFG_NUM_NONE -1 + +typedef struct xtalk_piomap_s *xtalk_piomap_t; + +/* It is often convenient to fold the XIO target port + * number into the XIO address. + */ +#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull) +#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull) +#define XIO_PORT_BITS (0xF000000000000000ull) +#define XIO_PORT_SHIFT (60) + +#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0) +#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS) +#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT)) +#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS)) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_XTALK_XTALK_H */ diff --git a/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xwidget.h b/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xwidget.h new file mode 100644 index 000000000..b4a13d740 --- /dev/null +++ b/target/linux/realtek/files/arch/rlx/include/asm/xtalk/xwidget.h @@ -0,0 +1,167 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * xwidget.h - generic crosstalk widget header file, derived from IRIX + * <sys/xtalk/xtalkwidget.h>, revision 1.32. + * + * Copyright (C) 1996, 1999 Silcon Graphics, Inc. + * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) + */ +#ifndef _ASM_XTALK_XWIDGET_H +#define _ASM_XTALK_XWIDGET_H + +#include <linux/types.h> +#include <asm/xtalk/xtalk.h> + +#define WIDGET_ID 0x04 +#define WIDGET_STATUS 0x0c +#define WIDGET_ERR_UPPER_ADDR 0x14 +#define WIDGET_ERR_LOWER_ADDR 0x1c +#define WIDGET_CONTROL 0x24 +#define WIDGET_REQ_TIMEOUT 0x2c +#define WIDGET_INTDEST_UPPER_ADDR 0x34 +#define WIDGET_INTDEST_LOWER_ADDR 0x3c +#define WIDGET_ERR_CMD_WORD 0x44 +#define WIDGET_LLP_CFG 0x4c +#define WIDGET_TFLUSH 0x54 + +/* WIDGET_ID */ +#define WIDGET_REV_NUM 0xf0000000 +#define WIDGET_PART_NUM 0x0ffff000 +#define WIDGET_MFG_NUM 0x00000ffe +#define WIDGET_REV_NUM_SHFT 28 +#define WIDGET_PART_NUM_SHFT 12 +#define WIDGET_MFG_NUM_SHFT 1 + +#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT) +#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT) +#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT) + +/* WIDGET_STATUS */ +#define WIDGET_LLP_REC_CNT 0xff000000 +#define WIDGET_LLP_TX_CNT 0x00ff0000 +#define WIDGET_PENDING 0x0000001f + +/* WIDGET_ERR_UPPER_ADDR */ +#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff + +/* WIDGET_CONTROL */ +#define WIDGET_F_BAD_PKT 0x00010000 +#define WIDGET_LLP_XBAR_CRD 0x0000f000 +#define WIDGET_LLP_XBAR_CRD_SHFT 12 +#define WIDGET_CLR_RLLP_CNT 0x00000800 +#define WIDGET_CLR_TLLP_CNT 0x00000400 +#define WIDGET_SYS_END 0x00000200 +#define WIDGET_MAX_TRANS 0x000001f0 +#define WIDGET_WIDGET_ID 0x0000000f + +/* WIDGET_INTDEST_UPPER_ADDR */ +#define WIDGET_INT_VECTOR 0xff000000 +#define WIDGET_INT_VECTOR_SHFT 24 +#define WIDGET_TARGET_ID 0x000f0000 +#define WIDGET_TARGET_ID_SHFT 16 +#define WIDGET_UPP_ADDR 0x0000ffff + +/* WIDGET_ERR_CMD_WORD */ +#define WIDGET_DIDN 0xf0000000 +#define WIDGET_SIDN 0x0f000000 +#define WIDGET_PACTYP 0x00f00000 +#define WIDGET_TNUM 0x000f8000 +#define WIDGET_COHERENT 0x00004000 +#define WIDGET_DS 0x00003000 +#define WIDGET_GBR 0x00000800 +#define WIDGET_VBPM 0x00000400 +#define WIDGET_ERROR 0x00000200 +#define WIDGET_BARRIER 0x00000100 + +/* WIDGET_LLP_CFG */ +#define WIDGET_LLP_MAXRETRY 0x03ff0000 +#define WIDGET_LLP_MAXRETRY_SHFT 16 +#define WIDGET_LLP_NULLTIMEOUT 0x0000fc00 +#define WIDGET_LLP_NULLTIMEOUT_SHFT 10 +#define WIDGET_LLP_MAXBURST 0x000003ff +#define WIDGET_LLP_MAXBURST_SHFT 0 + +/* + * according to the crosstalk spec, only 32-bits access to the widget + * configuration registers is allowed. some widgets may allow 64-bits + * access but software should not depend on it. registers beyond the + * widget target flush register are widget dependent thus will not be + * defined here + */ +#ifndef __ASSEMBLY__ +typedef u32 widgetreg_t; + +/* widget configuration registers */ +typedef volatile struct widget_cfg { + widgetreg_t w_pad_0; /* 0x00 */ + widgetreg_t w_id; /* 0x04 */ + widgetreg_t w_pad_1; /* 0x08 */ + widgetreg_t w_status; /* 0x0c */ + widgetreg_t w_pad_2; /* 0x10 */ + widgetreg_t w_err_upper_addr; /* 0x14 */ + widgetreg_t w_pad_3; /* 0x18 */ + widgetreg_t w_err_lower_addr; /* 0x1c */ + widgetreg_t w_pad_4; /* 0x20 */ + widgetreg_t w_control; /* 0x24 */ + widgetreg_t w_pad_5; /* 0x28 */ + widgetreg_t w_req_timeout; /* 0x2c */ + widgetreg_t w_pad_6; /* 0x30 */ + widgetreg_t w_intdest_upper_addr; /* 0x34 */ + widgetreg_t w_pad_7; /* 0x38 */ + widgetreg_t w_intdest_lower_addr; /* 0x3c */ + widgetreg_t w_pad_8; /* 0x40 */ + widgetreg_t w_err_cmd_word; /* 0x44 */ + widgetreg_t w_pad_9; /* 0x48 */ + widgetreg_t w_llp_cfg; /* 0x4c */ + widgetreg_t w_pad_10; /* 0x50 */ + widgetreg_t w_tflush; /* 0x54 */ +} widget_cfg_t; + +typedef struct { + unsigned didn:4; + unsigned sidn:4; + unsigned pactyp:4; + unsigned tnum:5; + unsigned ct:1; + unsigned ds:2; + unsigned gbr:1; + unsigned vbpm:1; + unsigned error:1; + unsigned bo:1; + unsigned other:8; +} w_err_cmd_word_f; + +typedef union { + widgetreg_t r; + w_err_cmd_word_f f; +} w_err_cmd_word_u; + +typedef struct xwidget_info_s *xwidget_info_t; + +/* + * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec. + */ +typedef struct xwidget_hwid_s { + xwidget_part_num_t part_num; + xwidget_rev_num_t rev_num; + xwidget_mfg_num_t mfg_num; +} *xwidget_hwid_t; + + +/* + * Returns 1 if a driver that handles devices described by hwid1 is able + * to manage a device with hardwareid hwid2. NOTE: We don't check rev + * numbers at all. + */ +#define XWIDGET_HARDWARE_ID_MATCH(hwid1, hwid2) \ + (((hwid1)->part_num == (hwid2)->part_num) && \ + (((hwid1)->mfg_num == XWIDGET_MFG_NUM_NONE) || \ + ((hwid2)->mfg_num == XWIDGET_MFG_NUM_NONE) || \ + ((hwid1)->mfg_num == (hwid2)->mfg_num))) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_XTALK_XWIDGET_H */ |