summaryrefslogtreecommitdiffstats
path: root/target/linux/ifxmips/files/arch/mips/danube
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ifxmips/files/arch/mips/danube')
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/Kconfig36
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/Makefile10
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/dma-core.c758
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/interrupt.c219
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/pci.c319
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/pmu.c45
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/prom.c81
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/reset.c66
-rw-r--r--target/linux/ifxmips/files/arch/mips/danube/setup.c177
9 files changed, 1711 insertions, 0 deletions
diff --git a/target/linux/ifxmips/files/arch/mips/danube/Kconfig b/target/linux/ifxmips/files/arch/mips/danube/Kconfig
new file mode 100644
index 000000000..1c5ad72ca
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/Kconfig
@@ -0,0 +1,36 @@
+# copyright 2007 john crispin <blogic@openwrt.org>
+
+menu "Danube built-in"
+
+config DANUBE_ASC_UART
+ bool "Danube asc uart"
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ default y
+
+config MTD_DANUBE
+ bool "Danube flash map"
+ default y
+
+config DANUBE_WDT
+ bool "Danube watchdog"
+ default y
+
+config DANUBE_LED
+ bool "Danube led"
+ default y
+
+config DANUBE_GPIO
+ bool "Danube gpio"
+ default y
+
+config DANUBE_SSC
+ bool "Danube ssc"
+ default y
+
+config DANUBE_EEPROM
+ bool "Danube eeprom"
+ default y
+
+endmenu
+
diff --git a/target/linux/ifxmips/files/arch/mips/danube/Makefile b/target/linux/ifxmips/files/arch/mips/danube/Makefile
new file mode 100644
index 000000000..fed8f2f45
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/Makefile
@@ -0,0 +1,10 @@
+#
+# Copyright 2007 openwrt.org
+# John Crispin <blogic@openwrt.org>
+#
+# Makefile for Infineon Danube
+#
+obj-y := reset.o prom.o setup.o interrupt.o dma-core.o
+
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_KGDB) += kgdb_serial.o
diff --git a/target/linux/ifxmips/files/arch/mips/danube/dma-core.c b/target/linux/ifxmips/files/arch/mips/danube/dma-core.c
new file mode 100644
index 000000000..7d29dbdc0
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/dma-core.c
@@ -0,0 +1,758 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/selection.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+
+#include <asm/danube/danube.h>
+#include <asm/danube/danube_irq.h>
+#include <asm/danube/danube_dma.h>
+#include <asm/danube/danube_pmu.h>
+
+/*25 descriptors for each dma channel,4096/8/20=25.xx*/
+#define DANUBE_DMA_DESCRIPTOR_OFFSET 25
+
+#define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */
+#define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */
+#define DMA_INT_BUDGET 100 /*budget for interrupt handling */
+#define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */
+
+extern void mask_and_ack_danube_irq (unsigned int irq_nr);
+extern void enable_danube_irq (unsigned int irq_nr);
+extern void disable_danube_irq (unsigned int irq_nr);
+
+u64 *g_desc_list;
+_dma_device_info dma_devs[MAX_DMA_DEVICE_NUM];
+_dma_channel_info dma_chan[MAX_DMA_CHANNEL_NUM];
+
+char global_device_name[MAX_DMA_DEVICE_NUM][20] =
+ { {"PPE"}, {"DEU"}, {"SPI"}, {"SDIO"}, {"MCTRL0"}, {"MCTRL1"} };
+
+_dma_chan_map default_dma_map[MAX_DMA_CHANNEL_NUM] = {
+ {"PPE", DANUBE_DMA_RX, 0, DANUBE_DMA_CH0_INT, 0},
+ {"PPE", DANUBE_DMA_TX, 0, DANUBE_DMA_CH1_INT, 0},
+ {"PPE", DANUBE_DMA_RX, 1, DANUBE_DMA_CH2_INT, 1},
+ {"PPE", DANUBE_DMA_TX, 1, DANUBE_DMA_CH3_INT, 1},
+ {"PPE", DANUBE_DMA_RX, 2, DANUBE_DMA_CH4_INT, 2},
+ {"PPE", DANUBE_DMA_TX, 2, DANUBE_DMA_CH5_INT, 2},
+ {"PPE", DANUBE_DMA_RX, 3, DANUBE_DMA_CH6_INT, 3},
+ {"PPE", DANUBE_DMA_TX, 3, DANUBE_DMA_CH7_INT, 3},
+ {"DEU", DANUBE_DMA_RX, 0, DANUBE_DMA_CH8_INT, 0},
+ {"DEU", DANUBE_DMA_TX, 0, DANUBE_DMA_CH9_INT, 0},
+ {"DEU", DANUBE_DMA_RX, 1, DANUBE_DMA_CH10_INT, 1},
+ {"DEU", DANUBE_DMA_TX, 1, DANUBE_DMA_CH11_INT, 1},
+ {"SPI", DANUBE_DMA_RX, 0, DANUBE_DMA_CH12_INT, 0},
+ {"SPI", DANUBE_DMA_TX, 0, DANUBE_DMA_CH13_INT, 0},
+ {"SDIO", DANUBE_DMA_RX, 0, DANUBE_DMA_CH14_INT, 0},
+ {"SDIO", DANUBE_DMA_TX, 0, DANUBE_DMA_CH15_INT, 0},
+ {"MCTRL0", DANUBE_DMA_RX, 0, DANUBE_DMA_CH16_INT, 0},
+ {"MCTRL0", DANUBE_DMA_TX, 0, DANUBE_DMA_CH17_INT, 0},
+ {"MCTRL1", DANUBE_DMA_RX, 1, DANUBE_DMA_CH18_INT, 1},
+ {"MCTRL1", DANUBE_DMA_TX, 1, DANUBE_DMA_CH19_INT, 1}
+};
+
+_dma_chan_map *chan_map = default_dma_map;
+volatile u32 g_danube_dma_int_status = 0;
+volatile int g_danube_dma_in_process = 0;/*0=not in process,1=in process*/
+
+void do_dma_tasklet (unsigned long);
+DECLARE_TASKLET (dma_tasklet, do_dma_tasklet, 0);
+
+u8*
+common_buffer_alloc (int len, int *byte_offset, void **opt)
+{
+ u8 *buffer = (u8 *) kmalloc (len * sizeof (u8), GFP_KERNEL);
+
+ *byte_offset = 0;
+
+ return buffer;
+}
+
+void
+common_buffer_free (u8 *dataptr, void *opt)
+{
+ if (dataptr)
+ kfree(dataptr);
+}
+
+void
+enable_ch_irq (_dma_channel_info *pCh)
+{
+ int chan_no = (int)(pCh - dma_chan);
+ int flag;
+
+ local_irq_save(flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(0x4a, DANUBE_DMA_CIE);
+ writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
+ local_irq_restore(flag);
+ enable_danube_irq(pCh->irq);
+}
+
+void
+disable_ch_irq (_dma_channel_info *pCh)
+{
+ int flag;
+ int chan_no = (int) (pCh - dma_chan);
+
+ local_irq_save(flag);
+ g_danube_dma_int_status &= ~(1 << chan_no);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(0, DANUBE_DMA_CIE);
+ writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN);
+ local_irq_restore(flag);
+ mask_and_ack_danube_irq(pCh->irq);
+}
+
+void
+open_chan (_dma_channel_info *pCh)
+{
+ int flag;
+ int chan_no = (int)(pCh - dma_chan);
+
+ local_irq_save(flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(readl(DANUBE_DMA_CCTRL) | 1, DANUBE_DMA_CCTRL);
+ if(pCh->dir == DANUBE_DMA_RX)
+ enable_ch_irq(pCh);
+ local_irq_restore(flag);
+}
+
+void
+close_chan(_dma_channel_info *pCh)
+{
+ int flag;
+ int chan_no = (int) (pCh - dma_chan);
+
+ local_irq_save(flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
+ disable_ch_irq(pCh);
+ local_irq_restore(flag);
+}
+
+void
+reset_chan (_dma_channel_info *pCh)
+{
+ int chan_no = (int) (pCh - dma_chan);
+
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
+}
+
+void
+rx_chan_intr_handler (int chan_no)
+{
+ _dma_device_info *pDev = (_dma_device_info *)dma_chan[chan_no].dma_dev;
+ _dma_channel_info *pCh = &dma_chan[chan_no];
+ struct rx_desc *rx_desc_p;
+ int tmp;
+ int flag;
+
+ /*handle command complete interrupt */
+ rx_desc_p = (struct rx_desc*)pCh->desc_base + pCh->curr_desc;
+ if (rx_desc_p->status.field.OWN == CPU_OWN
+ && rx_desc_p->status.field.C
+ && rx_desc_p->status.field.data_length < 1536){
+ /*Every thing is correct, then we inform the upper layer */
+ pDev->current_rx_chan = pCh->rel_chan_no;
+ if(pDev->intr_handler)
+ pDev->intr_handler(pDev, RCV_INT);
+ pCh->weight--;
+ } else {
+ local_irq_save(flag);
+ tmp = readl(DANUBE_DMA_CS);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(readl(DANUBE_DMA_CIS) | 0x7e, DANUBE_DMA_CIS);
+ writel(tmp, DANUBE_DMA_CS);
+ g_danube_dma_int_status &= ~(1 << chan_no);
+ local_irq_restore(flag);
+ enable_danube_irq(dma_chan[chan_no].irq);
+ }
+}
+
+inline void
+tx_chan_intr_handler (int chan_no)
+{
+ _dma_device_info *pDev = (_dma_device_info*)dma_chan[chan_no].dma_dev;
+ _dma_channel_info *pCh = &dma_chan[chan_no];
+ int tmp;
+ int flag;
+
+ local_irq_save(flag);
+ tmp = readl(DANUBE_DMA_CS);
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(readl(DANUBE_DMA_CIS) | 0x7e, DANUBE_DMA_CIS);
+ writel(tmp, DANUBE_DMA_CS);
+ g_danube_dma_int_status &= ~(1 << chan_no);
+ local_irq_restore(flag);
+ pDev->current_tx_chan = pCh->rel_chan_no;
+ if (pDev->intr_handler)
+ pDev->intr_handler(pDev, TRANSMIT_CPT_INT);
+}
+
+void
+do_dma_tasklet (unsigned long unused)
+{
+ int i;
+ int chan_no = 0;
+ int budget = DMA_INT_BUDGET;
+ int weight = 0;
+ int flag;
+
+ while (g_danube_dma_int_status)
+ {
+ if (budget-- < 0)
+ {
+ tasklet_schedule(&dma_tasklet);
+ return;
+ }
+ chan_no = -1;
+ weight = 0;
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ {
+ if ((g_danube_dma_int_status & (1 << i)) && dma_chan[i].weight > 0)
+ {
+ if (dma_chan[i].weight > weight)
+ {
+ chan_no = i;
+ weight = dma_chan[chan_no].weight;
+ }
+ }
+ }
+
+ if (chan_no >= 0)
+ {
+ if (chan_map[chan_no].dir == DANUBE_DMA_RX)
+ rx_chan_intr_handler(chan_no);
+ else
+ tx_chan_intr_handler(chan_no);
+ } else {
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ {
+ dma_chan[i].weight = dma_chan[i].default_weight;
+ }
+ }
+ }
+
+ local_irq_save(flag);
+ g_danube_dma_in_process = 0;
+ if (g_danube_dma_int_status)
+ {
+ g_danube_dma_in_process = 1;
+ tasklet_schedule(&dma_tasklet);
+ }
+ local_irq_restore(flag);
+}
+
+irqreturn_t
+dma_interrupt (int irq, void *dev_id)
+{
+ _dma_channel_info *pCh;
+ int chan_no = 0;
+ int tmp;
+
+ pCh = (_dma_channel_info*)dev_id;
+ chan_no = (int)(pCh - dma_chan);
+ if (chan_no < 0 || chan_no > 19)
+ BUG();
+
+ tmp = readl(DANUBE_DMA_IRNEN);
+ writel(0, DANUBE_DMA_IRNEN);
+ g_danube_dma_int_status |= 1 << chan_no;
+ writel(tmp, DANUBE_DMA_IRNEN);
+ mask_and_ack_danube_irq(irq);
+
+ if (!g_danube_dma_in_process)
+ {
+ g_danube_dma_in_process = 1;
+ tasklet_schedule(&dma_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+_dma_device_info*
+dma_device_reserve (char *dev_name)
+{
+ int i;
+
+ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
+ {
+ if (strcmp(dev_name, dma_devs[i].device_name) == 0)
+ {
+ if (dma_devs[i].reserved)
+ return NULL;
+ dma_devs[i].reserved = 1;
+ break;
+ }
+ }
+
+ return &dma_devs[i];
+}
+
+void
+dma_device_release (_dma_device_info *dev)
+{
+ dev->reserved = 0;
+}
+
+void
+dma_device_register(_dma_device_info *dev)
+{
+ int i, j;
+ int chan_no = 0;
+ u8 *buffer;
+ int byte_offset;
+ int flag;
+ _dma_device_info *pDev;
+ _dma_channel_info *pCh;
+ struct rx_desc *rx_desc_p;
+ struct tx_desc *tx_desc_p;
+
+ for (i = 0; i < dev->max_tx_chan_num; i++)
+ {
+ pCh = dev->tx_chan[i];
+ if (pCh->control == DANUBE_DMA_CH_ON)
+ {
+ chan_no = (int)(pCh - dma_chan);
+ for (j = 0; j < pCh->desc_len; j++)
+ {
+ tx_desc_p = (struct tx_desc*)pCh->desc_base + j;
+ memset(tx_desc_p, 0, sizeof(struct tx_desc));
+ }
+ local_irq_save(flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ /*check if the descriptor length is changed */
+ if (readl(DANUBE_DMA_CDLEN) != pCh->desc_len)
+ writel(pCh->desc_len, DANUBE_DMA_CDLEN);
+
+ writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
+ writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
+ while (readl(DANUBE_DMA_CCTRL) & 2){};
+ writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
+ writel(0x30100, DANUBE_DMA_CCTRL); /*reset and enable channel,enable channel later */
+ local_irq_restore(flag);
+ }
+ }
+
+ for (i = 0; i < dev->max_rx_chan_num; i++)
+ {
+ pCh = dev->rx_chan[i];
+ if (pCh->control == DANUBE_DMA_CH_ON)
+ {
+ chan_no = (int)(pCh - dma_chan);
+
+ for (j = 0; j < pCh->desc_len; j++)
+ {
+ rx_desc_p = (struct rx_desc*)pCh->desc_base + j;
+ pDev = (_dma_device_info*)(pCh->dma_dev);
+ buffer = pDev->buffer_alloc(pCh->packet_size, &byte_offset, (void*)&(pCh->opt[j]));
+ if (!buffer)
+ break;
+
+ dma_cache_inv((unsigned long) buffer, pCh->packet_size);
+
+ rx_desc_p->Data_Pointer = (u32)CPHYSADDR((u32)buffer);
+ rx_desc_p->status.word = 0;
+ rx_desc_p->status.field.byte_offset = byte_offset;
+ rx_desc_p->status.field.OWN = DMA_OWN;
+ rx_desc_p->status.field.data_length = pCh->packet_size;
+ }
+
+ local_irq_save(flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ /*check if the descriptor length is changed */
+ if (readl(DANUBE_DMA_CDLEN) != pCh->desc_len)
+ writel(pCh->desc_len, DANUBE_DMA_CDLEN);
+ writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
+ writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
+ while (readl(DANUBE_DMA_CCTRL) & 2){};
+ writel(0x0a, DANUBE_DMA_CIE); /*fix me, should enable all the interrupts here? */
+ writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
+ writel(0x30000, DANUBE_DMA_CCTRL);
+ local_irq_restore(flag);
+ enable_danube_irq(dma_chan[chan_no].irq);
+ }
+ }
+}
+
+void
+dma_device_unregister (_dma_device_info *dev)
+{
+ int i, j;
+ int chan_no;
+ _dma_channel_info *pCh;
+ struct rx_desc *rx_desc_p;
+ struct tx_desc *tx_desc_p;
+ int flag;
+
+ for (i = 0; i < dev->max_tx_chan_num; i++)
+ {
+ pCh = dev->tx_chan[i];
+ if (pCh->control == DANUBE_DMA_CH_ON)
+ {
+ chan_no = (int)(dev->tx_chan[i] - dma_chan);
+ local_irq_save (flag);
+ writel(chan_no, DANUBE_DMA_CS);
+ pCh->curr_desc = 0;
+ pCh->prev_desc = 0;
+ pCh->control = DANUBE_DMA_CH_OFF;
+ writel(0, DANUBE_DMA_CIE); /*fix me, should disable all the interrupts here? */
+ writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN); /*disable interrupts */
+ writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
+ while (readl(DANUBE_DMA_CCTRL) & 1) {};
+ local_irq_restore (flag);
+
+ for (j = 0; j < pCh->desc_len; j++)
+ {
+ tx_desc_p = (struct tx_desc*)pCh->desc_base + j;
+ if ((tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C)
+ || (tx_desc_p->status.field.OWN == DMA_OWN && tx_desc_p->status.field.data_length > 0))
+ {
+ dev->buffer_free ((u8 *) __va (tx_desc_p->Data_Pointer), (void*)pCh->opt[j]);
+ }
+ tx_desc_p->status.field.OWN = CPU_OWN;
+ memset (tx_desc_p, 0, sizeof (struct tx_desc));
+ }
+ //TODO should free buffer that is not transferred by dma
+ }
+ }
+
+ for (i = 0; i < dev->max_rx_chan_num; i++)
+ {
+ pCh = dev->rx_chan[i];
+ chan_no = (int)(dev->rx_chan[i] - dma_chan);
+ disable_danube_irq(pCh->irq);
+
+ local_irq_save(flag);
+ g_danube_dma_int_status &= ~(1 << chan_no);
+ pCh->curr_desc = 0;
+ pCh->prev_desc = 0;
+ pCh->control = DANUBE_DMA_CH_OFF;
+
+ writel(chan_no, DANUBE_DMA_CS);
+ writel(0, DANUBE_DMA_CIE); /*fix me, should disable all the interrupts here? */
+ writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN); /*disable interrupts */
+ writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
+ while (readl(DANUBE_DMA_CCTRL) & 1) {};
+
+ local_irq_restore (flag);
+ for (j = 0; j < pCh->desc_len; j++)
+ {
+ rx_desc_p = (struct rx_desc *) pCh->desc_base + j;
+ if ((rx_desc_p->status.field.OWN == CPU_OWN
+ && rx_desc_p->status.field.C)
+ || (rx_desc_p->status.field.OWN == DMA_OWN
+ && rx_desc_p->status.field.data_length > 0)) {
+ dev->buffer_free ((u8 *)
+ __va (rx_desc_p->
+ Data_Pointer),
+ (void *) pCh->opt[j]);
+ }
+ }
+ }
+}
+
+int
+dma_device_read (struct dma_device_info *dma_dev, u8 ** dataptr, void **opt)
+{
+ u8 *buf;
+ int len;
+ int byte_offset = 0;
+ void *p = NULL;
+ _dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan];
+ struct rx_desc *rx_desc_p;
+
+ /*get the rx data first */
+ rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc;
+ if (!(rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C))
+ {
+ return 0;
+ }
+
+ buf = (u8 *) __va (rx_desc_p->Data_Pointer);
+ *(u32*)dataptr = (u32)buf;
+ len = rx_desc_p->status.field.data_length;
+
+ if (opt)
+ {
+ *(int*)opt = (int)pCh->opt[pCh->curr_desc];
+ }
+
+ /*replace with a new allocated buffer */
+ buf = dma_dev->buffer_alloc(pCh->packet_size, &byte_offset, &p);
+
+ if (buf)
+ {
+ dma_cache_inv ((unsigned long) buf,
+ pCh->packet_size);
+ pCh->opt[pCh->curr_desc] = p;
+ wmb ();
+
+ rx_desc_p->Data_Pointer = (u32) CPHYSADDR ((u32) buf);
+ rx_desc_p->status.word = (DMA_OWN << 31) | ((byte_offset) << 23) | pCh->packet_size;
+ wmb ();
+ } else {
+ *(u32 *) dataptr = 0;
+ if (opt)
+ *(int *) opt = 0;
+ len = 0;
+ }
+
+ /*increase the curr_desc pointer */
+ pCh->curr_desc++;
+ if (pCh->curr_desc == pCh->desc_len)
+ pCh->curr_desc = 0;
+
+ return len;
+}
+
+int
+dma_device_write (struct dma_device_info *dma_dev, u8 * dataptr, int len, void *opt)
+{
+ int flag;
+ u32 tmp, byte_offset;
+ _dma_channel_info *pCh;
+ int chan_no;
+ struct tx_desc *tx_desc_p;
+ local_irq_save (flag);
+
+ pCh = dma_dev->tx_chan[dma_dev->current_tx_chan];
+ chan_no = (int)(pCh - (_dma_channel_info *) dma_chan);
+
+ tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->prev_desc;
+ while (tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C)
+ {
+ dma_dev->buffer_free((u8 *) __va (tx_desc_p->Data_Pointer), pCh->opt[pCh->prev_desc]);
+ memset(tx_desc_p, 0, sizeof (struct tx_desc));
+ pCh->prev_desc = (pCh->prev_desc + 1) % (pCh->desc_len);
+ tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->prev_desc;
+ }
+ tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->curr_desc;
+ /*Check whether this descriptor is available */
+ if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C)
+ {
+ /*if not , the tell the upper layer device */
+ dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
+ local_irq_restore(flag);
+ printk (KERN_INFO "%s %d: failed to write!\n", __func__, __LINE__);
+
+ return 0;
+ }
+ pCh->opt[pCh->curr_desc] = opt;
+ /*byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
+ byte_offset = ((u32) CPHYSADDR ((u32) dataptr)) % ((dma_dev->tx_burst_len) * 4);
+ dma_cache_wback ((unsigned long) dataptr, len);
+ wmb ();
+ tx_desc_p->Data_Pointer = (u32) CPHYSADDR ((u32) dataptr) - byte_offset;
+ wmb ();
+ tx_desc_p->status.word = (DMA_OWN << 31) | DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | ((byte_offset) << 23) | len;
+ wmb ();
+
+ pCh->curr_desc++;
+ if (pCh->curr_desc == pCh->desc_len)
+ pCh->curr_desc = 0;
+
+ /*Check whether this descriptor is available */
+ tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc;
+ if (tx_desc_p->status.field.OWN == DMA_OWN)
+ {
+ /*if not , the tell the upper layer device */
+ dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
+ }
+
+ writel(chan_no, DANUBE_DMA_CS);
+ tmp = readl(DANUBE_DMA_CCTRL);
+
+ if (!(tmp & 1))
+ pCh->open (pCh);
+
+ local_irq_restore (flag);
+
+ return len;
+}
+
+int
+map_dma_chan(_dma_chan_map *map)
+{
+ int i, j;
+ int result;
+
+ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
+ {
+ strcpy(dma_devs[i].device_name, global_device_name[i]);
+ }
+
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ {
+ dma_chan[i].irq = map[i].irq;
+ result = request_irq(dma_chan[i].irq, dma_interrupt, SA_INTERRUPT, "dma-core", (void*)&dma_chan[i]);
+ if (result)
+ {
+ printk("error, cannot get dma_irq!\n");
+ free_irq(dma_chan[i].irq, (void *) &dma_interrupt);
+
+ return -EFAULT;
+ }
+ }
+
+ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
+ {
+ dma_devs[i].num_tx_chan = 0; /*set default tx channel number to be one */
+ dma_devs[i].num_rx_chan = 0; /*set default rx channel number to be one */
+ dma_devs[i].max_rx_chan_num = 0;
+ dma_devs[i].max_tx_chan_num = 0;
+ dma_devs[i].buffer_alloc = &common_buffer_alloc;
+ dma_devs[i].buffer_free = &common_buffer_free;
+ dma_devs[i].intr_handler = NULL;
+ dma_devs[i].tx_burst_len = 4;
+ dma_devs[i].rx_burst_len = 4;
+ if (i == 0)
+ {
+ writel(0, DANUBE_DMA_PS);
+ writel(readl(DANUBE_DMA_PCTRL) | ((0xf << 8) | (1 << 6)), DANUBE_DMA_PCTRL); /*enable dma drop */
+ }
+
+ if (i == 1)
+ {
+ writel(1, DANUBE_DMA_PS);
+ writel(0x14, DANUBE_DMA_PCTRL); /*deu port setting */
+ }
+
+ for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++)
+ {
+ dma_chan[j].byte_offset = 0;
+ dma_chan[j].open = &open_chan;
+ dma_chan[j].close = &close_chan;
+ dma_chan[j].reset = &reset_chan;
+ dma_chan[j].enable_irq = &enable_ch_irq;
+ dma_chan[j].disable_irq = &disable_ch_irq;
+ dma_chan[j].rel_chan_no = map[j].rel_chan_no;
+ dma_chan[j].control = DANUBE_DMA_CH_OFF;
+ dma_chan[j].default_weight = DANUBE_DMA_CH_DEFAULT_WEIGHT;
+ dma_chan[j].weight = dma_chan[j].default_weight;
+ dma_chan[j].curr_desc = 0;
+ dma_chan[j].prev_desc = 0;
+ }
+
+ for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++)
+ {
+ if (strcmp(dma_devs[i].device_name, map[j].dev_name) == 0)
+ {
+ if (map[j].dir == DANUBE_DMA_RX)
+ {
+ dma_chan[j].dir = DANUBE_DMA_RX;
+ dma_devs[i].max_rx_chan_num++;
+ dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1] = &dma_chan[j];
+ dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1]->pri = map[j].pri;
+ dma_chan[j].dma_dev = (void*)&dma_devs[i];
+ } else if(map[j].dir == DANUBE_DMA_TX)
+ { /*TX direction */
+ dma_chan[j].dir = DANUBE_DMA_TX;
+ dma_devs[i].max_tx_chan_num++;
+ dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1] = &dma_chan[j];
+ dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1]->pri = map[j].pri;
+ dma_chan[j].dma_dev = (void*)&dma_devs[i];
+ } else {
+ printk ("WRONG DMA MAP!\n");
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+dma_chip_init(void)
+{
+ int i;
+
+ // enable DMA from PMU
+ danube_pmu_enable(DANUBE_PMU_PWDCR_DMA);
+
+ // reset DMA
+ writel(readl(DANUBE_DMA_CTRL) | 1, DANUBE_DMA_CTRL);
+
+ // diable all interrupts
+ writel(0, DANUBE_DMA_IRNEN);
+
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ {
+ writel(i, DANUBE_DMA_CS);
+ writel(0x2, DANUBE_DMA_CCTRL);
+ writel(0x80000040, DANUBE_DMA_CPOLL);
+ writel(readl(DANUBE_DMA_CCTRL) & ~0x1, DANUBE_DMA_CCTRL);
+
+ }
+}
+
+int
+danube_dma_init (void)
+{
+ int i;
+
+ dma_chip_init();
+ if (map_dma_chan(default_dma_map))
+ BUG();
+
+ g_desc_list = (u64*)KSEG1ADDR(__get_free_page(GFP_DMA));
+
+ if (g_desc_list == NULL)
+ {
+ printk("no memory for desriptor\n");
+ return -ENOMEM;
+ }
+
+ memset(g_desc_list, 0, PAGE_SIZE);
+
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ {
+ dma_chan[i].desc_base = (u32)g_desc_list + i * DANUBE_DMA_DESCRIPTOR_OFFSET * 8;
+ dma_chan[i].curr_desc = 0;
+ dma_chan[i].desc_len = DANUBE_DMA_DESCRIPTOR_OFFSET;
+
+ writel(i, DANUBE_DMA_CS);
+ writel((u32)CPHYSADDR(dma_chan[i].desc_base), DANUBE_DMA_CDBA);
+ writel(dma_chan[i].desc_len, DANUBE_DMA_CDLEN);
+ }
+
+ return 0;
+}
+
+arch_initcall(danube_dma_init);
+
+void
+dma_cleanup(void)
+{
+ int i;
+
+ free_page(KSEG0ADDR((unsigned long) g_desc_list));
+ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
+ free_irq(dma_chan[i].irq, (void*)&dma_interrupt);
+}
+
+EXPORT_SYMBOL (dma_device_reserve);
+EXPORT_SYMBOL (dma_device_release);
+EXPORT_SYMBOL (dma_device_register);
+EXPORT_SYMBOL (dma_device_unregister);
+EXPORT_SYMBOL (dma_device_read);
+EXPORT_SYMBOL (dma_device_write);
+
+MODULE_LICENSE ("GPL");
diff --git a/target/linux/ifxmips/files/arch/mips/danube/interrupt.c b/target/linux/ifxmips/files/arch/mips/danube/interrupt.c
new file mode 100644
index 000000000..c266608ea
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/interrupt.c
@@ -0,0 +1,219 @@
+/*
+ * arch/mips/danube/interrupt.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2005 Wu Qi Ming infineon
+ *
+ * Rewrite of Infineon Danube code, thanks to infineon for the support,
+ * software and hardware
+ *
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/danube/danube.h>
+#include <asm/danube/danube_irq.h>
+#include <asm/irq_cpu.h>
+
+
+void
+disable_danube_irq (unsigned int irq_nr)
+{
+ int i;
+ u32 *danube_ier = DANUBE_ICU_IM0_IER;
+
+ irq_nr -= INT_NUM_IRQ0;
+ for (i = 0; i <= 4; i++)
+ {
+ if (irq_nr < INT_NUM_IM_OFFSET){
+ writel(readl(danube_ier) & ~(1 << irq_nr ), danube_ier);
+ return;
+ }
+ danube_ier += DANUBE_ICU_OFFSET;
+ irq_nr -= INT_NUM_IM_OFFSET;
+ }
+}
+EXPORT_SYMBOL (disable_danube_irq);
+
+void
+mask_and_ack_danube_irq (unsigned int irq_nr)
+{
+ int i;
+ u32 *danube_ier = DANUBE_ICU_IM0_IER;
+ u32 *danube_isr = DANUBE_ICU_IM0_ISR;
+
+ irq_nr -= INT_NUM_IRQ0;
+ for (i = 0; i <= 4; i++)
+ {
+ if (irq_nr < INT_NUM_IM_OFFSET)
+ {
+ writel(readl(danube_ier) & ~(1 << irq_nr ), danube_ier);
+ writel((1 << irq_nr ), danube_isr);
+ return;
+ }
+ danube_ier += DANUBE_ICU_OFFSET;
+ danube_isr += DANUBE_ICU_OFFSET;
+ irq_nr -= INT_NUM_IM_OFFSET;
+ }
+}
+EXPORT_SYMBOL (mask_and_ack_danube_irq);
+
+void
+enable_danube_irq (unsigned int irq_nr)
+{
+ int i;
+ u32 *danube_ier = DANUBE_ICU_IM0_IER;
+
+ irq_nr -= INT_NUM_IRQ0;
+ for (i = 0; i <= 4; i++)
+ {
+ if (irq_nr < INT_NUM_IM_OFFSET)
+ {
+ writel(readl(danube_ier) | (1 << irq_nr ), danube_ier);
+ return;
+ }
+ danube_ier += DANUBE_ICU_OFFSET;
+ irq_nr -= INT_NUM_IM_OFFSET;
+ }
+}
+EXPORT_SYMBOL (enable_danube_irq);
+
+static unsigned int
+startup_danube_irq (unsigned int irq)
+{
+ enable_danube_irq (irq);
+ return 0;
+}
+
+static void
+end_danube_irq (unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ enable_danube_irq (irq);
+}
+
+static struct hw_interrupt_type danube_irq_type = {
+ "DANUBE",
+ .startup = startup_danube_irq,
+ .enable = enable_danube_irq,
+ .disable = disable_danube_irq,
+ .unmask = enable_danube_irq,
+ .ack = end_danube_irq,
+ .mask = disable_danube_irq,
+ .mask_ack = mask_and_ack_danube_irq,
+ .end = end_danube_irq,
+};
+
+static inline int
+ls1bit32(unsigned long x)
+{
+ __asm__ (
+ " .set push \n"
+ " .set mips32 \n"
+ " clz %0, %1 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return 31 - x;
+}
+
+void
+danube_hw_irqdispatch (int module)
+{
+ u32 irq;
+
+ irq = readl(DANUBE_ICU_IM0_IOSR + (module * DANUBE_ICU_OFFSET));
+ if (irq == 0)
+ return;
+
+ irq = ls1bit32 (irq);
+ do_IRQ ((int) irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
+
+ if ((irq == 22) && (module == 0)){
+ writel(readl(DANUBE_EBU_PCC_ISTAT) | 0x10, DANUBE_EBU_PCC_ISTAT);
+ }
+}
+
+asmlinkage void
+plat_irq_dispatch (void)
+{
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ unsigned int i;
+
+ if (pending & CAUSEF_IP7){
+ do_IRQ(MIPS_CPU_TIMER_IRQ);
+ goto out;
+ } else {
+ for (i = 0; i < 5; i++)
+ {
+ if (pending & (CAUSEF_IP2 << i))
+ {
+ danube_hw_irqdispatch(i);
+ goto out;
+ }
+ }
+ }
+ printk("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
+
+out:
+ return;
+}
+
+static struct irqaction cascade = {
+ .handler = no_action,
+ .flags = IRQF_DISABLED,
+ .name = "cascade",
+};
+
+void __init
+arch_init_irq(void)
+{
+ int i;
+
+ for (i = 0; i < 5; i++)
+ {
+ writel(0, DANUBE_ICU_IM0_IER + (i * DANUBE_ICU_OFFSET));
+ }
+
+ mips_cpu_irq_init();
+
+ for (i = 2; i <= 6; i++)
+ {
+ setup_irq(i, &cascade);
+ }
+
+ for (i = INT_NUM_IRQ0; i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
+ {
+#if 0
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].action = NULL;
+ irq_desc[i].depth = 1;
+#endif
+ set_irq_chip_and_handler(i, &danube_irq_type, handle_level_irq);
+ }
+
+ set_c0_status (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
+}
diff --git a/target/linux/ifxmips/files/arch/mips/danube/pci.c b/target/linux/ifxmips/files/arch/mips/danube/pci.c
new file mode 100644
index 000000000..1896336d8
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/pci.c
@@ -0,0 +1,319 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/danube/danube.h>
+#include <asm/danube/danube_irq.h>
+#include <asm/addrspace.h>
+#include <linux/vmalloc.h>
+
+#define DANUBE_PCI_MEM_BASE 0x18000000
+#define DANUBE_PCI_MEM_SIZE 0x02000000
+#define DANUBE_PCI_IO_BASE 0x1AE00000
+#define DANUBE_PCI_IO_SIZE 0x00200000
+
+#define DANUBE_PCI_CFG_BUSNUM_SHF 16
+#define DANUBE_PCI_CFG_DEVNUM_SHF 11
+#define DANUBE_PCI_CFG_FUNNUM_SHF 8
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+static int danube_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
+static int danube_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+
+struct pci_ops danube_pci_ops = {
+ .read = danube_pci_read_config_dword,
+ .write = danube_pci_write_config_dword
+};
+
+static struct resource pci_io_resource = {
+ .name = "io pci IO space",
+ .start = DANUBE_PCI_IO_BASE,
+ .end = DANUBE_PCI_IO_BASE + DANUBE_PCI_IO_SIZE - 1,
+ .flags = IORESOURCE_IO
+};
+
+static struct resource pci_mem_resource = {
+ .name = "ext pci memory space",
+ .start = DANUBE_PCI_MEM_BASE,
+ .end = DANUBE_PCI_MEM_BASE + DANUBE_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM
+};
+
+static struct pci_controller danube_pci_controller = {
+ .pci_ops = &danube_pci_ops,
+ .mem_resource = &pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &pci_io_resource,
+ .io_offset = 0x00000000UL,
+};
+
+static u32 danube_pci_mapped_cfg;
+
+static int
+danube_pci_config_access(unsigned char access_type,
+ struct pci_bus *bus, unsigned int devfn, unsigned int where, u32 *data)
+{
+ unsigned long cfg_base;
+ unsigned long flags;
+
+ u32 temp;
+
+ /* Danube support slot from 0 to 15 */
+ /* dev_fn 0&0x68 (AD29) is danube itself */
+ if ((bus->number != 0) || ((devfn & 0xf8) > 0x78)
+ || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68))
+ return 1;
+
+ local_irq_save(flags);
+
+ cfg_base = danube_pci_mapped_cfg;
+ cfg_base |= (bus->number << DANUBE_PCI_CFG_BUSNUM_SHF) | (devfn <<
+ DANUBE_PCI_CFG_FUNNUM_SHF) | (where & ~0x3);
+
+ /* Perform access */
+ if (access_type == PCI_ACCESS_WRITE)
+ {
+#ifdef CONFIG_DANUBE_PCI_HW_SWAP
+ writel(swab32(*data), ((u32*)cfg_base));
+#else
+ writel(*data, ((u32*)cfg_base));
+#endif
+ } else {
+ *data = readl(((u32*)(cfg_base)));
+#ifdef CONFIG_DANUBE_PCI_HW_SWAP
+ *data = swab32(*data);
+#endif
+ }
+ wmb();
+
+ /* clean possible Master abort */
+ cfg_base = (danube_pci_mapped_cfg | (0x0 << DANUBE_PCI_CFG_FUNNUM_SHF)) + 4;
+ temp = readl(((u32*)(cfg_base)));
+#ifdef CONFIG_DANUBE_PCI_HW_SWAP
+ temp = swab32 (temp);
+#endif
+ cfg_base = (danube_pci_mapped_cfg | (0x68 << DANUBE_PCI_CFG_FUNNUM_SHF)) + 4;
+ writel(temp, ((u32*)cfg_base));
+
+ local_irq_restore(flags);
+
+ if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
+ return 1;
+
+ return 0;
+}
+
+static int danube_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if (danube_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int danube_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if (size == 4)
+ {
+ data = val;
+ } else {
+ if (danube_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (danube_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+int pcibios_plat_dev_init(struct pci_dev *dev){
+ u8 pin;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+
+ switch(pin) {
+ case 0:
+ break;
+ case 1:
+ //falling edge level triggered:0x4, low level:0xc, rising edge:0x2
+ printk("%s:%s[%d] %08X \n", __FILE__, __func__, __LINE__, dev->irq);
+ writel(readl(DANUBE_EBU_PCC_CON) | 0xc, DANUBE_EBU_PCC_CON);
+ writel(readl(DANUBE_EBU_PCC_IEN) | 0x10, DANUBE_EBU_PCC_IEN);
+ break;
+ case 2:
+ case 3:
+ case 4:
+ printk ("WARNING: interrupt pin %d not supported yet!\n", pin);
+ default:
+ printk ("WARNING: invalid interrupt pin %d\n", pin);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void __init danube_pci_startup (void){
+ /*initialize the first PCI device--danube itself */
+ u32 temp_buffer;
+ /*TODO: trigger reset */
+ writel(readl(DANUBE_CGU_IFCCR) & ~0xf00000, DANUBE_CGU_IFCCR);
+ writel(readl(DANUBE_CGU_IFCCR) | 0x800000, DANUBE_CGU_IFCCR);
+ /* PCIS of IF_CLK of CGU : 1 =>PCI Clock output
+ 0 =>clock input
+ PADsel of PCI_CR of CGU : 1 =>From CGU
+ : 0 =>From pad
+ */
+ writel(readl(DANUBE_CGU_IFCCR) | (1 << 16), DANUBE_CGU_IFCCR);
+ writel((1 << 31) | (1 << 30), DANUBE_CGU_PCICR);
+
+ /* prepare GPIO */
+ /* PCI_RST: P1.5 ALT 01 */
+ //pliu20060613: start
+ writel(readl(DANUBE_GPIO_P1_OUT) | (1 << 5), DANUBE_GPIO_P1_OUT);
+ writel(readl(DANUBE_GPIO_P1_OD) | (1 << 5), DANUBE_GPIO_P1_OD);
+ writel(readl(DANUBE_GPIO_P1_DIR) | (1 << 5), DANUBE_GPIO_P1_DIR);
+ writel(readl(DANUBE_GPIO_P1_ALTSEL1) & ~(1 << 5), DANUBE_GPIO_P1_ALTSEL1);
+ writel(readl(DANUBE_GPIO_P1_ALTSEL0) & ~(1 << 5), DANUBE_GPIO_P1_ALTSEL0);
+ //pliu20060613: end
+ /* PCI_REQ1: P1.13 ALT 01 */
+ /* PCI_GNT1: P1.14 ALT 01 */
+ writel(readl(DANUBE_GPIO_P1_DIR) & ~0x2000, DANUBE_GPIO_P1_DIR);
+ writel(readl(DANUBE_GPIO_P1_DIR) | 0x4000, DANUBE_GPIO_P1_DIR);
+ writel(readl(DANUBE_GPIO_P1_ALTSEL1) & ~0x6000, DANUBE_GPIO_P1_ALTSEL1);
+ writel(readl(DANUBE_GPIO_P1_ALTSEL0) | 0x6000, DANUBE_GPIO_P1_ALTSEL0);
+ /* PCI_REQ2: P1.15 ALT 10 */
+ /* PCI_GNT2: P1.7 ALT 10 */
+
+
+ /* enable auto-switching between PCI and EBU */
+ writel(0xa, PCI_CR_CLK_CTRL);
+ /* busy, i.e. configuration is not done, PCI access has to be retried */
+ writel(readl(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD);
+ wmb ();
+ /* BUS Master/IO/MEM access */
+ writel(readl(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD);
+
+ temp_buffer = readl(PCI_CR_PC_ARB);
+ /* enable external 2 PCI masters */
+ temp_buffer &= (~(0xf << 16));
+ /* enable internal arbiter */
+ temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
+ /* enable internal PCI master reqest */
+ temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));
+
+ /* enable EBU reqest */
+ temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS));
+
+ /* enable all external masters request */
+ temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS));
+ writel(temp_buffer, PCI_CR_PC_ARB);
+
+ wmb ();
+
+ /* FPI ==> PCI MEM address mapping */
+ /* base: 0xb8000000 == > 0x18000000 */
+ /* size: 8x4M = 32M */
+ writel(0x18000000, PCI_CR_FCI_ADDR_MAP0);
+ writel(0x18400000, PCI_CR_FCI_ADDR_MAP1);
+ writel(0x18800000, PCI_CR_FCI_ADDR_MAP2);
+ writel(0x18c00000, PCI_CR_FCI_ADDR_MAP3);
+ writel(0x19000000, PCI_CR_FCI_ADDR_MAP4);
+ writel(0x19400000, PCI_CR_FCI_ADDR_MAP5);
+ writel(0x19800000, PCI_CR_FCI_ADDR_MAP6);
+ writel(0x19c00000, PCI_CR_FCI_ADDR_MAP7);
+
+ /* FPI ==> PCI IO address mapping */
+ /* base: 0xbAE00000 == > 0xbAE00000 */
+ /* size: 2M */
+ writel(0xbae00000, PCI_CR_FCI_ADDR_MAP11hg);
+
+ /* PCI ==> FPI address mapping */
+ /* base: 0x0 ==> 0x0 */
+ /* size: 32M */
+ /* BAR1 32M map to SDR address */
+ writel(0x0e000008, PCI_CR_BAR11MASK);
+ writel(0, PCI_CR_PCI_ADDR_MAP11);
+ writel(0, PCI_CS_BASE_ADDR1);
+#ifdef CONFIG_DANUBE_PCI_HW_SWAP
+ /* both TX and RX endian swap are enabled */
+ DANUBE_PCI_REG32 (PCI_CR_PCI_EOI_REG) |= 3;
+ wmb ();
+#endif
+ /*TODO: disable BAR2 & BAR3 - why was this in the origianl infineon code */
+ writel(readl(PCI_CR_BAR12MASK) | 0x80000000, PCI_CR_BAR12MASK);
+ writel(readl(PCI_CR_BAR13MASK) | 0x80000000, PCI_CR_BAR13MASK);
+ /*use 8 dw burse length */
+ writel(0x303, PCI_CR_FCI_BURST_LENGTH);
+
+ writel(readl(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD);
+ wmb();
+ writel(readl(DANUBE_GPIO_P1_OUT) & ~(1 << 5), DANUBE_GPIO_P1_OUT);
+ wmb();
+ mdelay (1);
+ writel(readl(DANUBE_GPIO_P1_OUT) | (1 << 5), DANUBE_GPIO_P1_OUT);
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin){
+ printk("\n\n\n%s:%s[%d] %d %d\n", __FILE__, __func__, __LINE__, slot, pin);
+ switch (slot) {
+ case 13:
+ /* IDSEL = AD29 --> USB Host Controller */
+ return (INT_NUM_IM1_IRL0 + 17);
+ case 14:
+ /* IDSEL = AD30 --> mini PCI connector */
+ //return (INT_NUM_IM1_IRL0 + 14);
+ return (INT_NUM_IM0_IRL0 + 22);
+ default:
+ printk("Warning: no IRQ found for PCI device in slot %d, pin %d\n", slot, pin);
+ return 0;
+ }
+}
+
+int pcibios_init(void){
+ extern int pci_probe_only;
+
+ pci_probe_only = 0;
+ printk ("PCI: Probing PCI hardware on host bus 0.\n");
+
+ danube_pci_startup ();
+
+ // DANUBE_PCI_REG32(PCI_CR_CLK_CTRL_REG) &= (~8);
+ danube_pci_mapped_cfg = ioremap_nocache(0x17000000, 0x800 * 16);
+ printk("Danube PCI mapped to 0x%08X\n", (unsigned long)danube_pci_mapped_cfg);
+
+ danube_pci_controller.io_map_base = (unsigned long)ioremap(DANUBE_PCI_IO_BASE, DANUBE_PCI_IO_SIZE - 1);
+
+ printk("Danube PCI I/O mapped to 0x%08X\n", (unsigned long)danube_pci_controller.io_map_base);
+
+ register_pci_controller(&danube_pci_controller);
+
+ return 0;
+}
+
+arch_initcall(pcibios_init);
diff --git a/target/linux/ifxmips/files/arch/mips/danube/pmu.c b/target/linux/ifxmips/files/arch/mips/danube/pmu.c
new file mode 100644
index 000000000..5bb66dbe5
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/pmu.c
@@ -0,0 +1,45 @@
+/*
+ * arch/mips/danube/pmu.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <asm/danube/danube.h>
+
+void
+danube_pmu_enable (unsigned int module)
+{
+ int err = 1000000;
+
+ writel(readl(DANUBE_PMU_PWDCR) & ~module, DANUBE_PMU_PWDCR);
+ while (--err && (readl(DANUBE_PMU_PWDSR) & module)) {}
+
+ if (!err)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(danube_pmu_enable);
+
+void
+danube_pmu_disable (unsigned int module)
+{
+ writel(readl(DANUBE_PMU_PWDCR) | module, DANUBE_PMU_PWDCR);
+}
+EXPORT_SYMBOL(danube_pmu_disable);
diff --git a/target/linux/ifxmips/files/arch/mips/danube/prom.c b/target/linux/ifxmips/files/arch/mips/danube/prom.c
new file mode 100644
index 000000000..efb06120e
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/prom.c
@@ -0,0 +1,81 @@
+/*
+ * arch/mips/danube/prom.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2005 Wu Qi Ming infineon
+ *
+ * Rewrite of Infineon Danube code, thanks to infineon for the support,
+ * software and hardware
+ *
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <asm/bootinfo.h>
+#include <asm/danube/danube.h>
+
+static char buf[1024];
+
+void
+prom_free_prom_memory (void)
+{
+}
+
+const char *
+get_system_type (void)
+{
+ return BOARD_SYSTEM_TYPE;
+}
+
+void
+prom_putchar (char c)
+{
+ while ((readl(DANUBE_ASC1_FSTAT) & ASCFSTAT_TXFFLMASK) >> ASCFSTAT_TXFFLOFF);
+
+ if (c == '\n')
+ writel('\r', DANUBE_ASC1_TBUF);
+ writel(c, DANUBE_ASC1_TBUF);
+}
+
+void
+prom_printf (const char * fmt, ...)
+{
+ va_list args;
+ int l;
+ char *p, *buf_end;
+
+ va_start(args, fmt);
+ l = vsprintf(buf, fmt, args);
+ va_end(args);
+ buf_end = buf + l;
+
+ for (p = buf; p < buf_end; p++)
+ {
+ prom_putchar(*p);
+ }
+}
+
+void __init
+prom_init(void)
+{
+ mips_machgroup = MACH_GROUP_DANUBE;
+ mips_machtype = MACH_INFINEON_DANUBE;
+
+ strcpy(&(arcs_cmdline[0]), "console=ttyS0,115200 rootfstype=squashfs,jffs2 init=/etc/preinit");
+ add_memory_region (0x00000000, 0x2000000, BOOT_MEM_RAM);
+}
diff --git a/target/linux/ifxmips/files/arch/mips/danube/reset.c b/target/linux/ifxmips/files/arch/mips/danube/reset.c
new file mode 100644
index 000000000..cb1793ca7
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/reset.c
@@ -0,0 +1,66 @@
+/*
+ * arch/mips/danube/prom.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2005 infineon
+ *
+ * Rewrite of Infineon Danube code, thanks to infineon for the support,
+ * software and hardware
+ *
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/danube/danube.h>
+
+static void
+danube_machine_restart (char *command)
+{
+ printk (KERN_NOTICE "System restart\n");
+ local_irq_disable ();
+
+ writel(readl(DANUBE_RCU_REQ) | DANUBE_RST_ALL, DANUBE_RCU_REQ);
+ for (;;);
+}
+
+static void
+danube_machine_halt (void)
+{
+ printk (KERN_NOTICE "System halted.\n");
+ local_irq_disable ();
+ for (;;);
+}
+
+static void
+danube_machine_power_off (void)
+{
+ printk (KERN_NOTICE "Please turn off the power now.\n");
+ local_irq_disable ();
+ for (;;);
+}
+
+void
+danube_reboot_setup (void)
+{
+ _machine_restart = danube_machine_restart;
+ _machine_halt = danube_machine_halt;
+ pm_power_off = danube_machine_power_off;
+}
diff --git a/target/linux/ifxmips/files/arch/mips/danube/setup.c b/target/linux/ifxmips/files/arch/mips/danube/setup.c
new file mode 100644
index 000000000..60b0ce28e
--- /dev/null
+++ b/target/linux/ifxmips/files/arch/mips/danube/setup.c
@@ -0,0 +1,177 @@
+/*
+ * arch/mips/danube/setup.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2004 peng.liu@infineon.com
+ *
+ * Rewrite of Infineon Danube code, thanks to infineon for the support,
+ * software and hardware
+ *
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/traps.h>
+#include <asm/cpu.h>
+#include <asm/irq.h>
+#include <asm/danube/danube.h>
+#include <asm/danube/danube_irq.h>
+#include <asm/danube/danube_pmu.h>
+
+static unsigned int r4k_offset; /* Amount to increment compare reg each time */
+static unsigned int r4k_cur; /* What counter should be at next timer irq */
+
+extern void danube_reboot_setup (void);
+void prom_printf (const char * fmt, ...);
+
+void
+__init bus_error_init (void)
+{
+ /* nothing yet */
+}
+
+unsigned int
+danube_get_ddr_hz (void)
+{
+ switch (readl(DANUBE_CGU_SYS) & 0x3)
+ {
+ case 0:
+ return CLOCK_167M;
+ case 1:
+ return CLOCK_133M;
+ case 2:
+ return CLOCK_111M;
+ }
+ return CLOCK_83M;
+}
+EXPORT_SYMBOL(danube_get_ddr_hz);
+
+unsigned int
+danube_get_cpu_hz (void)
+{
+ unsigned int ddr_clock = danube_get_ddr_hz();
+ switch (readl(DANUBE_CGU_SYS) & 0xc)
+ {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return ddr_clock;
+ }
+ return ddr_clock << 1;
+}
+EXPORT_SYMBOL(danube_get_cpu_hz);
+
+unsigned int
+danube_get_fpi_hz (void)
+{
+ unsigned int ddr_clock = danube_get_ddr_hz();
+ if (readl(DANUBE_CGU_SYS) & 0x40)
+ {
+ return ddr_clock >> 1;
+ }
+ return ddr_clock;
+}
+EXPORT_SYMBOL(danube_get_fpi_hz);
+
+unsigned int
+danube_get_cpu_ver (void)
+{
+ return readl(DANUBE_MCD_CHIPID) & 0xFFFFF000;
+}
+EXPORT_SYMBOL(danube_get_cpu_ver);
+
+void
+danube_time_init (void)
+{
+ mips_hpt_frequency = danube_get_cpu_hz() / 2;
+ r4k_offset = mips_hpt_frequency / HZ;
+ printk("mips_hpt_frequency:%d\n", mips_hpt_frequency);
+ printk("r4k_offset: %08x(%d)\n", r4k_offset, r4k_offset);
+}
+
+int
+danube_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ /*TODO*/
+ printk(KERN_ERR "TODO: BUS error\n");
+
+ return MIPS_BE_FATAL;
+}
+
+/* ISR GPTU Timer 6 for high resolution timer */
+static irqreturn_t
+danube_timer6_interrupt(int irq, void *dev_id)
+{
+ timer_interrupt(DANUBE_TIMER6_INT, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction hrt_irqaction = {
+ .handler = danube_timer6_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "hrt",
+};
+
+void __init
+plat_timer_setup (struct irqaction *irq)
+{
+ unsigned int retval;
+
+ setup_irq(MIPS_CPU_TIMER_IRQ, irq);
+
+ r4k_cur = (read_c0_count() + r4k_offset);
+ write_c0_compare(r4k_cur);
+
+ danube_pmu_enable(DANUBE_PMU_PWDCR_GPT | DANUBE_PMU_PWDCR_FPI);
+
+ writel(0x100, DANUBE_GPTU_GPT_CLC);
+
+ writel(0xffff, DANUBE_GPTU_GPT_CAPREL);
+ writel(0x80C0, DANUBE_GPTU_GPT_T6CON);
+
+ retval = setup_irq(DANUBE_TIMER6_INT, &hrt_irqaction);
+
+ if (retval)
+ {
+ prom_printf("reqeust_irq failed %d. HIGH_RES_TIMER is diabled\n", DANUBE_TIMER6_INT);
+ }
+}
+
+void __init
+plat_mem_setup (void)
+{
+ u32 status;
+ prom_printf("This %s has a cpu rev of 0x%X\n", BOARD_SYSTEM_TYPE, danube_get_cpu_ver());
+
+ //TODO WHY ???
+ /* clear RE bit*/
+ status = read_c0_status();
+ status &= (~(1<<25));
+ write_c0_status(status);
+
+ danube_reboot_setup();
+ board_time_init = danube_time_init;
+ board_be_handler = &danube_be_handler;
+
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+}