From 127797e5cf8a036825007586b914b75897aba554 Mon Sep 17 00:00:00 2001 From: Kurt Mahan Date: Wed, 31 Oct 2007 16:39:31 -0600 Subject: [PATCH] Add Coldfire Support into existing headers. Modifications to the various M68k header files to add Coldfire processor support. LTIBName: mcfv4e-coldfire-headers Signed-off-by: Kurt Mahan --- include/asm-m68k/atomic.h | 23 ++- include/asm-m68k/bitops.h | 426 ++++++++++++++++++++++++++++++++++++++++ include/asm-m68k/bootinfo.h | 13 ++ include/asm-m68k/byteorder.h | 12 +- include/asm-m68k/cacheflush.h | 4 + include/asm-m68k/checksum.h | 10 + include/asm-m68k/delay.h | 26 +++ include/asm-m68k/div64.h | 4 + include/asm-m68k/elf.h | 2 +- include/asm-m68k/fpu.h | 2 + include/asm-m68k/io.h | 26 +++- include/asm-m68k/irq.h | 5 +- include/asm-m68k/machdep.h | 7 + include/asm-m68k/mmu_context.h | 84 ++++++++- include/asm-m68k/page.h | 20 ++- include/asm-m68k/page_offset.h | 7 +- include/asm-m68k/pci.h | 99 ++++++---- include/asm-m68k/pgalloc.h | 4 +- include/asm-m68k/pgtable.h | 15 ++ include/asm-m68k/processor.h | 46 ++++- include/asm-m68k/ptrace.h | 11 + include/asm-m68k/raw_io.h | 58 ++++++ include/asm-m68k/segment.h | 10 + include/asm-m68k/setup.h | 27 +++ include/asm-m68k/signal.h | 5 + include/asm-m68k/string.h | 2 + include/asm-m68k/system.h | 17 ++- include/asm-m68k/thread_info.h | 1 + include/asm-m68k/tlbflush.h | 16 ++- include/asm-m68k/uaccess.h | 4 + 30 files changed, 925 insertions(+), 61 deletions(-) --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -2,7 +2,7 @@ #define __ARCH_M68K_ATOMIC__ -#include +#include /* local_irq_XXX() */ /* * Atomic operations that C can't guarantee us. Useful for @@ -21,12 +21,20 @@ typedef struct { int counter; } atomic_t static inline void atomic_add(int i, atomic_t *v) { +#ifndef CONFIG_COLDFIRE __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); +#else + __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "d" (i), "m" (*v)); +#endif } static inline void atomic_sub(int i, atomic_t *v) { +#ifndef CONFIG_COLDFIRE __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); +#else + __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "d" (i), "m" (*v)); +#endif } static inline void atomic_inc(atomic_t *v) @@ -46,6 +54,14 @@ static inline int atomic_dec_and_test(at return c != 0; } +static __inline__ int atomic_dec_and_test_lt(volatile atomic_t *v) +{ + char c; + __asm__ __volatile__("subql #1,%1; slt %0" : "=d" (c), "=m" (*v) + : "m" (*v)); + return c != 0 ; +} + static inline int atomic_inc_and_test(atomic_t *v) { char c; @@ -156,7 +172,12 @@ static inline int atomic_sub_and_test(in static inline int atomic_add_negative(int i, atomic_t *v) { char c; +#ifndef CONFIG_COLDFIRE __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); +#else + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "=m" (*v) + : "d" (i) , "m" (*v)); +#endif return c != 0; } --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h @@ -19,6 +19,7 @@ * * They use the standard big-endian m680x0 bit ordering. */ +#ifndef CONFIG_COLDFIRE #define test_and_set_bit(nr,vaddr) \ (__builtin_constant_p(nr) ? \ @@ -457,4 +458,429 @@ static inline int ext2_find_next_bit(con #endif /* __KERNEL__ */ +#else /* CONFIG_COLDFIRE */ + +#define test_and_set_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_set_bit(nr, vaddr) : \ + __generic_coldfire_test_and_set_bit(nr, vaddr)) + + +static __inline__ int __constant_coldfire_test_and_set_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + + __asm__ __volatile__ ("bset %2,%1; sne %0" + : "=d" (retval), "+QUd" (*p) + : "di" (nr & 7)); + return retval; +} + +static __inline__ int __generic_coldfire_test_and_set_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bset %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); + return retval; +} +#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) + +#define set_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_set_bit(nr, vaddr) : \ + __generic_coldfire_set_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_set_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bset %1,%0" + : "+QUd" (*p) : "di" (nr & 7)); +} + +static __inline__ void __generic_coldfire_set_bit(int nr, volatile void *vaddr) +{ + __asm__ __volatile__ ("bset %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} +#define __set_bit(nr, vaddr) set_bit(nr, vaddr) + +#define test_and_clear_bit(nr, vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_clear_bit(nr, vaddr) : \ + __generic_coldfire_test_and_clear_bit(nr, vaddr)) + +static __inline__ int __constant_coldfire_test_and_clear_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + + __asm__ __volatile__ ("bclr %2,%1; sne %0" + : "=d" (retval), "+QUd" (*p) + : "id" (nr & 7)); + + return retval; +} + +static __inline__ int __generic_coldfire_test_and_clear_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bclr %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr & 7) + : "memory"); + + return retval; +} +#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +#define clear_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_clear_bit(nr, vaddr) : \ + __generic_coldfire_clear_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_clear_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bclr %1,%0" + : "+QUd" (*p) : "id" (nr & 7)); +} + +static __inline__ void __generic_coldfire_clear_bit(int nr, + volatile void *vaddr) +{ + __asm__ __volatile__ ("bclr %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) + +#define test_and_change_bit(nr, vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_change_bit(nr, vaddr) : \ + __generic_coldfire_test_and_change_bit(nr, vaddr)) + +static __inline__ int __constant_coldfire_test_and_change_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + + __asm__ __volatile__ ("bchg %2,%1; sne %0" + : "=d" (retval), "+QUd" (*p) + : "id" (nr & 7)); + + return retval; +} + +static __inline__ int __generic_coldfire_test_and_change_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bchg %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "id" (nr) + : "memory"); + + return retval; +} +#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) +#define __change_bit(nr, vaddr) change_bit(nr, vaddr) + +#define change_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_change_bit(nr, vaddr) : \ + __generic_coldfire_change_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_change_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bchg %1,%0" + : "+QUd" (*p) : "id" (nr & 7)); +} + +static __inline__ void __generic_coldfire_change_bit(int nr, + volatile void *vaddr) +{ + __asm__ __volatile__ ("bchg %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} + +static inline int test_bit(int nr, const unsigned long *vaddr) +{ + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; +} + +static __inline__ unsigned long ffz(unsigned long word) +{ + unsigned long result = 0; + + while (word & 1) { + result++; + word >>= 1; + } + return result; +} + +/* find_next_zero_bit() finds the first zero bit in a bit string of length + * 'size' bits, starting the search at bit 'offset'. This is largely based + * on Linus's ALPHA routines. + */ +static __inline__ unsigned long find_next_zero_bit(void *addr, + unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + tmp = *(p++); + if (~tmp) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL >> size; +found_middle: + return result + ffz(tmp); +} + +#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \ + (size), 0) + +/* Ported from included/linux/bitops.h */ +static __inline__ int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} +#define __ffs(x) (ffs(x) - 1) + +/* find_next_bit - find the next set bit in a memory region + * (from asm-ppc/bitops.h) + */ +static __inline__ unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + unsigned int *p = ((unsigned int *) addr) + (offset >> 5); + unsigned int result = offset & ~31UL; + unsigned int tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp &= ~0UL << offset; + if (size < 32) + goto found_first; + if (tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size >= 32) { + tmp = *p++; + if (tmp != 0) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= ~0UL >> (32 - size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifdef __KERNEL__ + +/* Ported from include/linux/bitops.h */ +static __inline__ int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#include +#include +#include + +#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \ + (size), 0) +#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \ + (unsigned long *)(addr)) +#define minix_set_bit(nr, addr) set_bit((nr), \ + (unsigned long *)(addr)) +#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \ + (unsigned long *)(addr)) + +static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr) +{ + int *a = (int *)vaddr; + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + return ((mask & *a) != 0); +} + +#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \ + (unsigned long *)(addr)) + +static inline int ext2_test_bit(int nr, const void *vaddr) +{ + const unsigned char *p = vaddr; + return (p[nr >> 3] & (1U << (nr & 7))) != 0; +} + +static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size) +{ + const unsigned long *p = vaddr, *addr = vaddr; + int res; + + if (!size) + return 0; + + size = (size >> 5) + ((size & 31) > 0); + while (*p++ == ~0UL) { + if (--size == 0) + return (p - addr) << 5; + } + + --p; + for (res = 0; res < 32; res++) + if (!ext2_test_bit (res, p)) + break; + return (p - addr) * 32 + res; +} + +static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, + unsigned offset) +{ + const unsigned long *addr = vaddr; + const unsigned long *p = addr + (offset >> 5); + int bit = offset & 31UL, res; + + if (offset >= size) + return size; + + if (bit) { + /* Look for zero in first longword */ + for (res = bit; res < 32; res++) + if (!ext2_test_bit (res, p)) + return (p - addr) * 32 + res; + p++; + } + /* No zero yet, search remaining full bytes for a zero */ + res = ext2_find_first_zero_bit(p, size - 32 * (p - addr)); + return (p - addr) * 32 + res; +} + +#endif /* KERNEL */ + +#endif /* CONFIG_COLDFIRE */ + #endif /* _M68K_BITOPS_H */ --- a/include/asm-m68k/bootinfo.h +++ b/include/asm-m68k/bootinfo.h @@ -49,6 +49,19 @@ struct bi_record { #endif /* __ASSEMBLY__ */ +#ifndef __ASSEMBLY__ + +struct uboot_record { + unsigned long bd_info; + unsigned long initrd_start; + unsigned long initrd_end; + unsigned long cmd_line_start; + unsigned long cmd_line_stop; +}; + +#endif /* __ASSEMBLY__ */ + + /* * Tag Definitions * --- a/include/asm-m68k/byteorder.h +++ b/include/asm-m68k/byteorder.h @@ -4,8 +4,15 @@ #include #include -#ifdef __GNUC__ - +#if defined(__GNUC__) +#if defined(__mcfisaaplus__) || defined(__mcfisac__) +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val) +{ + __asm__ ("byterev %0" : "=d" (val) : "0" (val)); + return val; +} +#define __arch__swab32(x) ___arch__swab32(x) +#elif !defined(__mcoldfire__) static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val) { __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val)); @@ -14,6 +21,7 @@ static __inline__ __attribute_const__ __ #define __arch__swab32(x) ___arch__swab32(x) #endif +#endif #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __BYTEORDER_HAS_U64__ --- a/include/asm-m68k/cacheflush.h +++ b/include/asm-m68k/cacheflush.h @@ -6,6 +6,9 @@ /* cache code */ #define FLUSH_I_AND_D (0x00000808) #define FLUSH_I (0x00000008) +#ifdef CONFIG_COLDFIRE +#include +#else /* !CONFIG_COLDFIRE */ /* * Cache handling functions @@ -153,4 +156,5 @@ static inline void copy_from_user_page(s memcpy(dst, src, len); } +#endif /* !CONFIG_COLDFIRE */ #endif /* _M68K_CACHEFLUSH_H */ --- a/include/asm-m68k/checksum.h +++ b/include/asm-m68k/checksum.h @@ -34,6 +34,7 @@ extern __wsum csum_partial_copy_nocheck( void *dst, int len, __wsum sum); +#ifndef CONFIG_COLDFIRE /* CF has own copy in arch/m68k/lib/checksum.c */ /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. @@ -59,6 +60,9 @@ static inline __sum16 ip_fast_csum(const : "memory"); return (__force __sum16)~sum; } +#else +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif /* * Fold a partial checksum @@ -67,6 +71,11 @@ static inline __sum16 ip_fast_csum(const static inline __sum16 csum_fold(__wsum sum) { unsigned int tmp = (__force u32)sum; +#ifdef CONFIG_COLDFIRE + tmp = (tmp & 0xffff) + (tmp >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + return (__force __sum16) ~tmp; +#else __asm__("swap %1\n\t" "addw %1, %0\n\t" "clrw %1\n\t" @@ -74,6 +83,7 @@ static inline __sum16 csum_fold(__wsum s : "=&d" (sum), "=&d" (tmp) : "0" (sum), "1" (tmp)); return (__force __sum16)~sum; +#endif } --- a/include/asm-m68k/delay.h +++ b/include/asm-m68k/delay.h @@ -11,8 +11,25 @@ static inline void __delay(unsigned long loops) { +#if defined(CONFIG_COLDFIRE) + /* The coldfire runs this loop at significantly different speeds + * depending upon long word alignment or not. We'll pad it to + * long word alignment which is the faster version. + * The 0x4a8e is of course a 'tstl %fp' instruction. This is better + * than using a NOP (0x4e71) instruction because it executes in one + * cycle not three and doesn't allow for an arbitary delay waiting + * for bus cycles to finish. Also fp/a6 isn't likely to cause a + * stall waiting for the register to become valid if such is added + * to the coldfire at some stage. + */ + __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t" + "1: subql #1, %0\n\t" + "jcc 1b" + : "=d" (loops) : "0" (loops)); +#else __asm__ __volatile__ ("1: subql #1,%0; jcc 1b" : "=d" (loops) : "0" (loops)); +#endif } extern void __bad_udelay(void); @@ -26,12 +43,17 @@ extern void __bad_udelay(void); */ static inline void __const_udelay(unsigned long xloops) { +#if defined(CONFIG_COLDFIRE) + + __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ); +#else unsigned long tmp; __asm__ ("mulul %2,%0:%1" : "=d" (xloops), "=d" (tmp) : "d" (xloops), "1" (loops_per_jiffy)); __delay(xloops * HZ); +#endif } static inline void __udelay(unsigned long usecs) @@ -46,12 +68,16 @@ static inline void __udelay(unsigned lon static inline unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c) { +#if defined(CONFIG_COLDFIRE) + return (long)(((unsigned long long)a * b)/c); +#else unsigned long tmp; __asm__ ("mulul %2,%0:%1; divul %3,%0:%1" : "=d" (tmp), "=d" (a) : "d" (b), "d" (c), "1" (a)); return a; +#endif } #endif /* defined(_M68K_DELAY_H) */ --- a/include/asm-m68k/div64.h +++ b/include/asm-m68k/div64.h @@ -5,6 +5,7 @@ /* n = n / base; return rem; */ +#ifndef CONFIG_COLDFIRE #define do_div(n, base) ({ \ union { \ unsigned long n32[2]; \ @@ -24,6 +25,9 @@ (n) = __n.n64; \ __rem; \ }) +#else +# include +#endif extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif /* _M68K_DIV64_H */ --- a/include/asm-m68k/elf.h +++ b/include/asm-m68k/elf.h @@ -60,7 +60,7 @@ typedef struct user_m68kfp_struct elf_fp #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 #define USE_ELF_CORE_DUMP -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #define ELF_EXEC_PAGESIZE 4096 #else #define ELF_EXEC_PAGESIZE 8192 --- a/include/asm-m68k/fpu.h +++ b/include/asm-m68k/fpu.h @@ -12,6 +12,8 @@ #define FPSTATESIZE (96/sizeof(unsigned char)) #elif defined(CONFIG_M68KFPU_EMU) #define FPSTATESIZE (28/sizeof(unsigned char)) +#elif defined(CONFIG_CFV4E) +#define FPSTATESIZE (16/sizeof(unsigned char)) #elif defined(CONFIG_M68060) #define FPSTATESIZE (12/sizeof(unsigned char)) #else --- a/include/asm-m68k/io.h +++ b/include/asm-m68k/io.h @@ -397,10 +397,12 @@ static inline void memcpy_toio(volatile __builtin_memcpy((void __force *) dst, src, count); } -#ifndef CONFIG_SUN3 -#define IO_SPACE_LIMIT 0xffff -#else +#if defined(CONFIG_SUN3) #define IO_SPACE_LIMIT 0x0fffffff +#elif defined(CONFIG_COLDFIRE) +#define IO_SPACE_LIMIT 0xffffffff +#else +#define IO_SPACE_LIMIT 0xffff #endif #endif /* __KERNEL__ */ @@ -418,4 +420,22 @@ static inline void memcpy_toio(volatile */ #define xlate_dev_kmem_ptr(p) p +#ifdef CONFIG_COLDFIRE + +#define memset_io(a, b, c) memset((void *)(a), (b), (c)) +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) +#if !defined(readb) +#define readb(addr) \ + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; }) +#define readw(addr) \ + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; }) +#define readl(addr) \ + ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) +#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b)) +#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b)) +#endif /* readb */ +#endif /* CONFIG_COLDFIRE */ + #endif /* _IO_H */ --- a/include/asm-m68k/irq.h +++ b/include/asm-m68k/irq.h @@ -11,7 +11,10 @@ * Currently the Atari has 72 and the Amiga 24, but if both are * supported in the kernel it is better to make room for 72. */ -#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X) +#if defined(CONFIG_COLDFIRE) +#define SYS_IRQS 256 +#define NR_IRQS SYS_IRQS +#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X) #define NR_IRQS 200 #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) #define NR_IRQS 72 --- a/include/asm-m68k/machdep.h +++ b/include/asm-m68k/machdep.h @@ -32,4 +32,11 @@ extern void (*mach_heartbeat) (int); extern void (*mach_l2_flush) (int); extern void (*mach_beep) (unsigned int, unsigned int); +#ifdef CONFIG_COLDFIRE +extern void __init config_coldfire(void); +extern void __init mmu_context_init(void); +extern irq_handler_t mach_default_handler; +extern void (*mach_tick)(void); +#endif + #endif /* _M68K_MACHDEP_H */ --- a/include/asm-m68k/mmu_context.h +++ b/include/asm-m68k/mmu_context.h @@ -7,7 +7,7 @@ static inline void enter_lazy_tlb(struct { } -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #include #include @@ -102,7 +102,7 @@ static inline void activate_mm(struct mm switch_mm_0460(next_mm); } -#else /* CONFIG_SUN3 */ +#elif defined(CONFIG_SUN3) #include #include @@ -150,5 +150,83 @@ static inline void activate_mm(struct mm activate_context(next_mm); } -#endif +#else /* CONFIG_COLDFIRE */ + +#include +#include +#include + +#define NO_CONTEXT 256 +#define LAST_CONTEXT 255 +#define FIRST_CONTEXT 1 + +extern void set_context(mm_context_t context, pgd_t *pgd); +extern unsigned long context_map[]; +extern mm_context_t next_mmu_context; + +extern atomic_t nr_free_contexts; +extern struct mm_struct *context_mm[LAST_CONTEXT+1]; +extern void steal_context(void); + +static inline void get_mmu_context(struct mm_struct *mm) +{ + mm_context_t ctx; + + if (mm->context != NO_CONTEXT) + return; + while (atomic_dec_and_test_lt(&nr_free_contexts)) { + atomic_inc(&nr_free_contexts); + steal_context(); + } + ctx = next_mmu_context; + while (test_and_set_bit(ctx, context_map)) { + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); + if (ctx > LAST_CONTEXT) + ctx = 0; + } + next_mmu_context = (ctx + 1) & LAST_CONTEXT; + mm->context = ctx; + context_mm[ctx] = mm; +} + +/* + * Set up the context for a new address space. + */ +#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) + +/* + * We're finished using the context for an address space. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + if (mm->context != NO_CONTEXT) { + clear_bit(mm->context, context_map); + mm->context = NO_CONTEXT; + atomic_inc(&nr_free_contexts); + } +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + get_mmu_context(tsk->mm); + set_context(tsk->mm->context, next->pgd); +} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void activate_mm(struct mm_struct *active_mm, + struct mm_struct *mm) +{ + get_mmu_context(mm); + set_context(mm->context, mm->pgd); +} + +#define deactivate_mm(tsk, mm) do { } while (0) + +extern void mmu_context_init(void); + +#endif /* CONFIG_COLDFIRE */ #endif --- a/include/asm-m68k/page.h +++ b/include/asm-m68k/page.h @@ -4,7 +4,7 @@ #include /* PAGE_SHIFT determines the page size */ -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #define PAGE_SHIFT (12) #else #define PAGE_SHIFT (13) @@ -116,10 +116,23 @@ typedef struct page *pgtable_t; extern unsigned long m68k_memoffset; -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) #define WANT_PAGE_VIRTUAL +#if defined(CONFIG_COLDFIRE) +static inline unsigned long ___pa(void *vaddr) +{ + return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE); +} +#define __pa(vaddr) ___pa((void *)(vaddr)) + +static inline void *__va(unsigned long paddr) +{ + return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET); +} + +#else static inline unsigned long ___pa(void *vaddr) { unsigned long paddr; @@ -141,6 +154,7 @@ static inline void *__va(unsigned long p : "0" (paddr), "i" (m68k_fixup_memoffset)); return vaddr; } +#endif #else /* !CONFIG_SUN3 */ /* This #define is a horrible hack to suppress lots of warnings. --m */ @@ -172,6 +186,8 @@ static inline void *__va(unsigned long x * memory node, but we have no highmem, so that works for now. * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots * of the shifts unnecessary. + * + * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr. */ #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) --- a/include/asm-m68k/page_offset.h +++ b/include/asm-m68k/page_offset.h @@ -1,8 +1,11 @@ /* This handles the memory map.. */ -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #define PAGE_OFFSET_RAW 0x00000000 -#else +#elif defined(CONFIG_SUN3) #define PAGE_OFFSET_RAW 0x0E000000 +#else /* CONFIG_COLDFIRE */ +#define PAGE_OFFSET_RAW 0xC0000000 +#define PHYS_OFFSET 0x40000000 #endif --- a/include/asm-m68k/pci.h +++ b/include/asm-m68k/pci.h @@ -1,57 +1,86 @@ -#ifndef _ASM_M68K_PCI_H -#define _ASM_M68K_PCI_H - /* - * asm-m68k/pci_m68k.h - m68k specific PCI declarations. + * asm-m68k/pci.h - m68k specific PCI declarations. * - * Written by Wout Klaren. + * Coldfire Implementation Copyright (c) 2007 Freescale Semiconductor, Inc. + * Kurt Mahan */ +#ifndef _ASM_M68K_PCI_H +#define _ASM_M68K_PCI_H -#include +#ifdef CONFIG_PCI -struct pci_ops; +#include /* - * Structure with hardware dependent information and functions of the - * PCI bus. + * The PCI address space does equal the physical memory + * address space. The networking and block device layers use + * this boolean for bounce buffer decisions. */ +#define PCI_DMA_BUS_IS_PHYS (1) -struct pci_bus_info -{ - /* - * Resources of the PCI bus. - */ - - struct resource mem_space; - struct resource io_space; +#define PCIBIOS_MIN_IO 0x00004000 +#define PCIBIOS_MIN_MEM 0x02000000 - /* - * System dependent functions. - */ +#define pcibios_assign_all_busses() 0 +#define pcibios_scan_all_fns(a, b) 0 - struct pci_ops *m68k_pci_ops; +static inline void +pcibios_set_master(struct pci_dev *dev) +{ + /* no special bus mastering setup handling */ +} - void (*fixup)(int pci_modify); - void (*conf_device)(struct pci_dev *dev); -}; +static inline void +pcibios_penalize_isa_irq(int irq, int active) +{ + /* no dynamic PCI IRQ allocation */ +} -#define pcibios_assign_all_busses() 0 -#define pcibios_scan_all_fns(a, b) 0 +static inline void +pcibios_add_platform_entries(struct pci_dev *dev) +{ + /* no special handling */ +} -static inline void pcibios_set_master(struct pci_dev *dev) +static inline void +pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res) { - /* No special bus mastering setup handling */ +#ifdef CONFIG_M54455 + if ((res->start == 0xa0000000) || (res->start == 0xa8000000)) { + /* HACK! FIX! kludge to fix bridge mapping */ + region->start = res->start & 0x0fffffff; + region->end = res->end & 0x0fffffff; + } else { + region->start = res->start; + region->end = res->end; + } +#else + region->start = res->start; + region->end = res->end; +#endif } -static inline void pcibios_penalize_isa_irq(int irq, int active) +static inline void +pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region) { - /* We don't do dynamic PCI IRQ allocation */ + res->start = region->start; + res->end = region->end; } -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} +#endif /* CONFIG_PCI */ #endif /* _ASM_M68K_PCI_H */ --- a/include/asm-m68k/pgalloc.h +++ b/include/asm-m68k/pgalloc.h @@ -8,8 +8,10 @@ #include -#ifdef CONFIG_SUN3 +#if defined(CONFIG_SUN3) #include +#elif defined(CONFIG_COLDFIRE) +#include #else #include #endif --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h @@ -40,6 +40,8 @@ /* PGDIR_SHIFT determines what a third-level page table entry can map */ #ifdef CONFIG_SUN3 #define PGDIR_SHIFT 17 +#elif defined(CONFIG_COLDFIRE) +#define PGDIR_SHIFT 22 #else #define PGDIR_SHIFT 25 #endif @@ -54,6 +56,10 @@ #define PTRS_PER_PTE 16 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 +#elif defined(CONFIG_COLDFIRE) +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 1024 #else #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 8 @@ -66,6 +72,9 @@ #ifdef CONFIG_SUN3 #define KMAP_START 0x0DC00000 #define KMAP_END 0x0E000000 +#elif defined(CONFIG_COLDFIRE) +#define KMAP_START 0xe0000000 +#define KMAP_END 0xf0000000 #else #define KMAP_START 0xd0000000 #define KMAP_END 0xf0000000 @@ -130,6 +139,8 @@ static inline void update_mmu_cache(stru #ifdef CONFIG_SUN3 #include +#elif defined(CONFIG_COLDFIRE) +#include #else #include #endif @@ -140,6 +151,9 @@ static inline void update_mmu_cache(stru /* * Macro to mark a page protection value as "uncacheable". */ +#ifdef CONFIG_COLDFIRE +# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE)) +#else /* CONFIG_COLDFIRE */ #ifdef SUN3_PAGE_NOCACHE # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE #else @@ -154,6 +168,7 @@ static inline void update_mmu_cache(stru ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ : (prot))) +#endif /* CONFIG_COLDFIRE */ #endif /* !__ASSEMBLY__ */ /* --- a/include/asm-m68k/processor.h +++ b/include/asm-m68k/processor.h @@ -22,24 +22,38 @@ static inline unsigned long rdusp(void) { unsigned long usp; +#ifndef CONFIG_COLDFIRE __asm__ __volatile__("move %/usp,%0" : "=a" (usp)); +#else + __asm__ __volatile__("movel %/usp,%0" : "=a" (usp)); +#endif return usp; } static inline void wrusp(unsigned long usp) { +#ifndef CONFIG_COLDFIRE __asm__ __volatile__("move %0,%/usp" : : "a" (usp)); +#else + __asm__ __volatile__("movel %0,%/usp" : : "a" (usp)); +#endif } /* * User space process size: 3.75GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #define TASK_SIZE (0xF0000000UL) +#elif defined(CONFIG_COLDFIRE) +#define TASK_SIZE (0xC0000000UL) +#else /* CONFIG_SUN3 */ +#ifdef __ASSEMBLY__ +#define TASK_SIZE (0x0E000000) #else #define TASK_SIZE (0x0E000000UL) #endif +#endif #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE @@ -49,9 +63,11 @@ static inline void wrusp(unsigned long u /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#ifndef CONFIG_SUN3 -#define TASK_UNMAPPED_BASE 0xC0000000UL -#else +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +#define TASK_UNMAPPED_BASE 0xC0000000UL +#elif defined(CONFIG_COLDFIRE) +#define TASK_UNMAPPED_BASE 0x80000000UL +#else /* CONFIG_SUN3 */ #define TASK_UNMAPPED_BASE 0x0A000000UL #endif #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) @@ -60,7 +76,11 @@ struct thread_struct { unsigned long ksp; /* kernel stack pointer */ unsigned long usp; /* user stack pointer */ unsigned short sr; /* saved status register */ +#ifndef CONFIG_COLDFIRE unsigned short fs; /* saved fs (sfc, dfc) */ +#else + mm_segment_t fs; +#endif unsigned long crp[2]; /* cpu root pointer */ unsigned long esp0; /* points to SR of stack frame */ unsigned long faddr; /* info about last fault */ @@ -81,6 +101,7 @@ struct thread_struct { /* * Do necessary setup to start up a newly executed thread. */ +#ifndef CONFIG_COLDFIRE static inline void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp) { @@ -91,6 +112,23 @@ static inline void start_thread(struct p regs->sr &= ~0x2000; wrusp(usp); } +#else +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + if (current->mm) \ + (_regs)->d5 = current->mm->start_data; \ + (_regs)->sr &= ~0x2000; \ + wrusp(_usp); \ +} while (0) +#endif /* Forward declaration, a strange C thing */ struct task_struct; --- a/include/asm-m68k/ptrace.h +++ b/include/asm-m68k/ptrace.h @@ -38,10 +38,21 @@ struct pt_regs { long d0; long orig_d0; long stkadj; +#ifndef CONFIG_COLDFIRE unsigned short sr; unsigned long pc; unsigned format : 4; /* frame format specifier */ unsigned vector : 12; /* vector offset */ +#else + unsigned long mmuar; + unsigned long mmusr; + unsigned format : 4; /* frame format specifier */ + unsigned fs2 : 2; + unsigned vector: 8; + unsigned fs1 : 2; + unsigned short sr; + unsigned long pc; +#endif }; /* --- a/include/asm-m68k/raw_io.h +++ b/include/asm-m68k/raw_io.h @@ -77,6 +77,7 @@ static inline void raw_outsb(volatile u8 out_8(port, *buf++); } +#ifndef CONFIG_COLDFIRE static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr) { unsigned int tmp; @@ -342,6 +343,63 @@ static inline void raw_outsw_swapw(volat : "d0", "a0", "a1", "d6"); } + +#else /*CONFIG_COLDFIRE */ + +static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = raw_inw(port); +} + +static inline void raw_outsw(volatile u16 *port, const u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + raw_outw(*buf, port); +} + +static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = raw_inl(port); +} + +static inline void raw_outsl(volatile u32 *port, const u32 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + raw_outl(*buf, port); +} + +static inline void raw_insw_swapw(volatile u16 *port, u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = in_le16(port); + +} + +static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + out_le16(port, *buf); +} +#endif /*CONFIG_COLDFIRE */ + #endif /* __KERNEL__ */ #endif /* _RAW_IO_H */ --- a/include/asm-m68k/segment.h +++ b/include/asm-m68k/segment.h @@ -29,6 +29,7 @@ typedef struct { * Get/set the SFC/DFC registers for MOVES instructions */ +#ifndef CONFIG_COLDFIRE static inline mm_segment_t get_fs(void) { mm_segment_t _v; @@ -50,6 +51,15 @@ static inline void set_fs(mm_segment_t v : /* no outputs */ : "r" (val.seg) : "memory"); } +#else /* CONFIG_COLDFIRE */ + +#include +#define get_fs() (current->thread.fs) +#define set_fs(val) (current->thread.fs = (val)) +#define get_ds() (KERNEL_DS) + +#endif /* CONFIG_COLDFIRE */ + #define segment_eq(a,b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ --- a/include/asm-m68k/setup.h +++ b/include/asm-m68k/setup.h @@ -40,6 +40,7 @@ #define MACH_HP300 9 #define MACH_Q40 10 #define MACH_SUN3X 11 +#define MACH_CFMMU 12 #define COMMAND_LINE_SIZE 256 @@ -189,6 +190,14 @@ extern unsigned long m68k_machtype; # define MACH_TYPE (MACH_SUN3X) #endif +#if !defined(CONFIG_COLDFIRE) +# define MACH_IS_COLDFIRE (0) +#else +# define CONFIG_COLDFIRE_ONLY +# define MACH_IS_COLDFIRE (1) +# define MACH_TYPE (MACH_CFMMU) +#endif + #ifndef MACH_TYPE # define MACH_TYPE (m68k_machtype) #endif @@ -211,23 +220,31 @@ extern unsigned long m68k_machtype; #define CPUB_68030 1 #define CPUB_68040 2 #define CPUB_68060 3 +#define CPUB_CFV4E 4 #define CPU_68020 (1< +#ifndef CONFIG_COLDFIRE #define __HAVE_ARCH_SIG_BITOPS static inline void sigaddset(sigset_t *set, int _sig) @@ -200,6 +201,10 @@ static inline int sigfindinword(unsigned struct pt_regs; extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); +#else + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) +#endif /* CONFIG_COLDFIRE */ #endif /* __KERNEL__ */ --- a/include/asm-m68k/string.h +++ b/include/asm-m68k/string.h @@ -93,6 +93,7 @@ static inline char *strchr(const char *s return (char *)s - 1; } +#ifndef CONFIG_COLDFIRE #define __HAVE_ARCH_STRCMP static inline int strcmp(const char *cs, const char *ct) { @@ -110,6 +111,7 @@ static inline int strcmp(const char *cs, : "+a" (cs), "+a" (ct), "=d" (res)); return res; } +#endif #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); --- a/include/asm-m68k/system.h +++ b/include/asm-m68k/system.h @@ -63,16 +63,25 @@ asmlinkage void resume(void); #define smp_read_barrier_depends() ((void)0) /* interrupt control.. */ -#if 0 -#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") -#else #include +#ifndef CONFIG_COLDFIRE #define local_irq_enable() ({ \ if (MACH_IS_Q40 || !hardirq_count()) \ asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ }) -#endif #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") +#else /* CONFIG_COLDFIRE */ +#define local_irq_enable() \ + asm volatile ("move.w %%sr, %%d0\n\t" \ + "andil #0xf8ff,%%d0\n\t" \ + "move.w %%d0, %%sr\n" \ + : : : "cc", "d0", "memory") +#define local_irq_disable() \ + asm volatile ("move %/sr,%%d0\n\t" \ + "ori.l #0x0700,%%d0\n\t" \ + "move %%d0,%/sr\n" \ + : : : "cc", "%d0", "memory") +#endif #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") --- a/include/asm-m68k/thread_info.h +++ b/include/asm-m68k/thread_info.h @@ -58,5 +58,6 @@ struct thread_info { #define TIF_DELAYED_TRACE 14 /* single step a syscall */ #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ #define TIF_MEMDIE 16 +#define TIF_FREEZE 17 /* freezing processes */ #endif /* _ASM_M68K_THREAD_INFO_H */ --- a/include/asm-m68k/tlbflush.h +++ b/include/asm-m68k/tlbflush.h @@ -2,7 +2,7 @@ #define _M68K_TLBFLUSH_H -#ifndef CONFIG_SUN3 +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) #include @@ -92,7 +92,12 @@ static inline void flush_tlb_kernel_rang flush_tlb_all(); } -#else +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +#elif defined(CONFIG_SUN3) /* Reserved PMEGs. */ @@ -214,6 +219,13 @@ static inline void flush_tlb_kernel_page sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG); } +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +#else /* CONFIG_COLDFIRE */ +#include #endif #endif /* _M68K_TLBFLUSH_H */ --- a/include/asm-m68k/uaccess.h +++ b/include/asm-m68k/uaccess.h @@ -1,6 +1,9 @@ #ifndef __M68K_UACCESS_H #define __M68K_UACCESS_H +#ifdef CONFIG_COLDFIRE +#include +#else /* * User space memory access functions */ @@ -367,4 +370,5 @@ unsigned long __clear_user(void __user * #define strlen_user(str) strnlen_user(str, 32767) +#endif /* CONFIG_COLDFIRE */ #endif /* _M68K_UACCESS_H */