1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
|
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -435,7 +435,6 @@ config ARCH_IXP4XX
select CPU_XSCALE
select GENERIC_GPIO
select GENERIC_CLOCKEVENTS
- select DMABOUNCE if PCI
help
Support for Intel's IXP4XX (XScale) family of processors.
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -199,6 +199,43 @@ config IXP4XX_INDIRECT_PCI
need to use the indirect method instead. If you don't know
what you need, leave this option unselected.
+config IXP4XX_LEGACY_DMABOUNCE
+ bool "Legacy PCI DMA bounce support"
+ depends on PCI
+ default n
+ select DMABOUNCE
+ help
+ The IXP4xx is limited to a 64MB window for PCI DMA, which
+ requires that PCI accesses >= 64MB are bounced via buffers
+ below 64MB.
+
+ The kernel has traditionally handled this issue by using ARM
+ specific DMA bounce support code for all accesses >= 64MB.
+ That code causes problems of its own, so it is desirable to
+ disable it.
+
+ Enabling this option makes IXP4xx continue to use the problematic
+ ARM DMA bounce code. Disabling this option makes IXP4xx use the
+ kernel's generic bounce code.
+
+ Say 'N'.
+
+config IXP4XX_ZONE_DMA
+ bool "Support > 64MB RAM"
+ depends on !IXP4XX_LEGACY_DMABOUNCE
+ default y
+ select ZONE_DMA
+ help
+ The IXP4xx is limited to a 64MB window for PCI DMA, which
+ requires that PCI accesses above 64MB are bounced via buffers
+ below 64MB.
+
+ Disabling this option allows you to omit the support code for
+ DMA-able memory allocations and DMA bouncing, but the kernel
+ will then not work properly if more than 64MB of RAM is present.
+
+ Say 'Y' unless your platform is limited to <= 64MB of RAM.
+
config IXP4XX_QMGR
tristate "IXP4xx Queue Manager support"
help
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -321,27 +321,33 @@ static int abort_handler(unsigned long a
*/
static int ixp4xx_pci_platform_notify(struct device *dev)
{
- if(dev->bus == &pci_bus_type) {
- *dev->dma_mask = SZ_64M - 1;
+ if (dev->bus == &pci_bus_type) {
+ *dev->dma_mask = SZ_64M - 1;
dev->coherent_dma_mask = SZ_64M - 1;
+#ifdef CONFIG_DMABOUNCE
dmabounce_register_dev(dev, 2048, 4096);
+#endif
}
return 0;
}
static int ixp4xx_pci_platform_notify_remove(struct device *dev)
{
- if(dev->bus == &pci_bus_type) {
+#ifdef CONFIG_DMABOUNCE
+ if (dev->bus == &pci_bus_type)
dmabounce_unregister_dev(dev);
- }
+#endif
return 0;
}
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+#ifdef CONFIG_DMABOUNCE
+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
}
+#endif
+#ifdef CONFIG_ZONE_DMA
/*
* Only first 64MB of memory can be accessed via PCI.
* We use GFP_DMA to allocate safe buffers to do map/unmap.
@@ -364,6 +370,7 @@ void __init ixp4xx_adjust_zones(unsigned
zhole_size[1] = zhole_size[0];
zhole_size[0] = 0;
}
+#endif
void __init ixp4xx_pci_preinit(void)
{
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -16,10 +16,12 @@
#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
+#ifdef CONFIG_ZONE_DMA
void ixp4xx_adjust_zones(unsigned long *size, unsigned long *holes);
#define arch_adjust_zones(size, holes) \
ixp4xx_adjust_zones(size, holes)
+#endif
#define ISA_DMA_THRESHOLD (SZ_64M - 1)
#define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
+#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
@@ -248,7 +249,13 @@ static inline dma_addr_t map_single(stru
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
}
- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
+#ifdef CONFIG_DMABOUNCE
+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
+}
+
+ if (device_info && (needs_bounce || dma_needs_bounce_2(dev, dma_addr, size))) {
struct safe_buffer *buf;
buf = alloc_safe_buffer(device_info, ptr, size, dir);
@@ -282,6 +289,7 @@ static inline dma_addr_t map_single(stru
return dma_addr;
}
+#endif
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
|