summaryrefslogtreecommitdiffstats
path: root/target/linux/generic
diff options
context:
space:
mode:
authorhauke <hauke@3c298f89-4303-0410-b956-a3cf2f4a3e73>2010-07-19 20:25:20 +0000
committerhauke <hauke@3c298f89-4303-0410-b956-a3cf2f4a3e73>2010-07-19 20:25:20 +0000
commit9fe2a2d5546fe9370eb9c977d0816dc32e31d461 (patch)
treed5497947876a46c2d56f2f702552f5f0b942ce0f /target/linux/generic
parent03685da10dba687075810931c495f38abac9ba8c (diff)
brcm47xx: prepare brcm47xx patches for sending to mainline.
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@22296 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/generic')
-rw-r--r--target/linux/generic/patches-2.6.34/975-ssb_update.patch716
-rw-r--r--target/linux/generic/patches-2.6.34/976-ssb_add_dma_dev.patch52
-rw-r--r--target/linux/generic/patches-2.6.35/975-ssb_update.patch708
-rw-r--r--target/linux/generic/patches-2.6.35/976-ssb_add_dma_dev.patch52
4 files changed, 1415 insertions, 113 deletions
diff --git a/target/linux/generic/patches-2.6.34/975-ssb_update.patch b/target/linux/generic/patches-2.6.34/975-ssb_update.patch
index 245cec18a..6525a1030 100644
--- a/target/linux/generic/patches-2.6.34/975-ssb_update.patch
+++ b/target/linux/generic/patches-2.6.34/975-ssb_update.patch
@@ -1,15 +1,380 @@
+--- a/drivers/net/b44.c
++++ b/drivers/net/b44.c
+@@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *)
+
+ static void b44_init_hw(struct b44 *, int);
+
+-static int dma_desc_align_mask;
+ static int dma_desc_sync_size;
+ static int instance;
+
+@@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for
+ unsigned long offset,
+ enum dma_data_direction dir)
+ {
+- ssb_dma_sync_single_range_for_device(sdev, dma_base,
+- offset & dma_desc_align_mask,
+- dma_desc_sync_size, dir);
++ dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
++ dma_desc_sync_size, dir);
+ }
+
+ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
+@@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for
+ unsigned long offset,
+ enum dma_data_direction dir)
+ {
+- ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
+- offset & dma_desc_align_mask,
+- dma_desc_sync_size, dir);
++ dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
++ dma_desc_sync_size, dir);
+ }
+
+ static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+@@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
+
+ BUG_ON(skb == NULL);
+
+- ssb_dma_unmap_single(bp->sdev,
+- rp->mapping,
+- skb->len,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev,
++ rp->mapping,
++ skb->len,
++ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+@@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *
+ if (skb == NULL)
+ return -ENOMEM;
+
+- mapping = ssb_dma_map_single(bp->sdev, skb->data,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+
+ /* Hardware bug work-around, the chip is unable to do PCI DMA
+ to/from anything above 1GB :-( */
+- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ /* Sigh... */
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping,
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+- mapping = ssb_dma_map_single(bp->sdev, skb->data,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
+- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
++ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+@@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *b
+ dest_idx * sizeof(*dest_desc),
+ DMA_BIDIRECTIONAL);
+
+- ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ }
+
+ static int b44_rx(struct b44 *bp, int budget)
+@@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int bu
+ struct rx_header *rh;
+ u16 len;
+
+- ssb_dma_sync_single_for_cpu(bp->sdev, map,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ rh = (struct rx_header *) skb->data;
+ len = le16_to_cpu(rh->len);
+ if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
+@@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int bu
+ skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+ if (skb_size < 0)
+ goto drop_it;
+- ssb_dma_unmap_single(bp->sdev, map,
+- skb_size, DMA_FROM_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, map,
++ skb_size, DMA_FROM_DEVICE);
+ /* Leave out rx_header */
+ skb_put(skb, len + RX_PKT_OFFSET);
+ skb_pull(skb, RX_PKT_OFFSET);
+@@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct
+ goto err_out;
+ }
+
+- mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ struct sk_buff *bounce_skb;
+
+ /* Chip can't handle DMA to/from >1GB, use bounce buffer */
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping, len,
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+ DMA_TO_DEVICE);
+
+ bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ if (!bounce_skb)
+ goto err_out;
+
+- mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
+- len, DMA_TO_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping,
++ mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
++ len, DMA_TO_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ goto err_out;
+@@ -1014,8 +1011,6 @@ static netdev_tx_t b44_start_xmit(struct
+ if (TX_BUFFS_AVAIL(bp) < 1)
+ netif_stop_queue(dev);
+
+- dev->trans_start = jiffies;
+-
+ out_unlock:
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+@@ -1070,8 +1065,8 @@ static void b44_free_rings(struct b44 *b
+
+ if (rp->skb == NULL)
+ continue;
+- ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+@@ -1082,8 +1077,8 @@ static void b44_free_rings(struct b44 *b
+
+ if (rp->skb == NULL)
+ continue;
+- ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
++ DMA_TO_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+@@ -1105,14 +1100,12 @@ static void b44_init_rings(struct b44 *b
+ memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+- ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
++ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+- ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
++ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+
+ for (i = 0; i < bp->rx_pending; i++) {
+ if (b44_alloc_rx_skb(bp, -1, i) < 0)
+@@ -1132,27 +1125,23 @@ static void b44_free_consistent(struct b
+ bp->tx_buffers = NULL;
+ if (bp->rx_ring) {
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+- ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
++ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
+- bp->rx_ring, bp->rx_ring_dma,
+- GFP_KERNEL);
++ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
++ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
+ }
+ if (bp->tx_ring) {
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+- ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
++ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
+- bp->tx_ring, bp->tx_ring_dma,
+- GFP_KERNEL);
++ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
++ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
+ }
+@@ -1177,7 +1166,8 @@ static int b44_alloc_consistent(struct b
+ goto out_err;
+
+ size = DMA_TABLE_BYTES;
+- bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
++ bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
++ &bp->rx_ring_dma, gfp);
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+@@ -1189,11 +1179,11 @@ static int b44_alloc_consistent(struct b
+ if (!rx_ring)
+ goto out_err;
+
+- rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
++ DMA_TABLE_BYTES,
++ DMA_BIDIRECTIONAL);
+
+- if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
+ rx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(rx_ring);
+ goto out_err;
+@@ -1204,7 +1194,8 @@ static int b44_alloc_consistent(struct b
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
+
+- bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
++ bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
++ &bp->tx_ring_dma, gfp);
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to ssb_dma_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+@@ -1216,11 +1207,11 @@ static int b44_alloc_consistent(struct b
+ if (!tx_ring)
+ goto out_err;
+
+- tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
++ DMA_TABLE_BYTES,
++ DMA_TO_DEVICE);
+
+- if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
+ tx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(tx_ring);
+ goto out_err;
+@@ -2178,12 +2169,14 @@ static int __devinit b44_init_one(struct
+ "Failed to powerup the bus\n");
+ goto err_out_free_dev;
+ }
+- err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
+- if (err) {
++
++ if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
++ dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ dev_err(sdev->dev,
+ "Required 30BIT DMA mask unsupported by the system\n");
+ goto err_out_powerdown;
+ }
++
+ err = b44_get_invariants(bp);
+ if (err) {
+ dev_err(sdev->dev,
+@@ -2346,7 +2339,6 @@ static int __init b44_init(void)
+ int err;
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+- dma_desc_align_mask = ~(dma_desc_align_size - 1);
+ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
+ err = b44_pci_init();
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
-@@ -233,6 +233,8 @@ void ssb_chipcommon_init(struct ssb_chip
+@@ -209,6 +209,24 @@ static void chipco_powercontrol_init(str
+ }
+ }
+
++/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
++static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
++{
++ struct ssb_bus *bus = cc->dev->bus;
++
++ switch (bus->chip_id) {
++ case 0x4312:
++ case 0x4322:
++ case 0x4328:
++ return 7000;
++ case 0x4325:
++ /* TODO: */
++ default:
++ return 15000;
++ }
++}
++
++/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
+ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
+ {
+ struct ssb_bus *bus = cc->dev->bus;
+@@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(stru
+
+ if (bus->bustype != SSB_BUSTYPE_PCI)
+ return;
++
++ if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
++ cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
++ return;
++ }
++
+ if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
+ return;
+
+@@ -233,6 +257,9 @@ void ssb_chipcommon_init(struct ssb_chip
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
++ ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
-@@ -370,6 +372,7 @@ u32 ssb_chipco_gpio_control(struct ssb_c
+@@ -370,6 +397,7 @@ u32 ssb_chipco_gpio_control(struct ssb_c
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
@@ -17,9 +382,66 @@
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
+--- a/drivers/ssb/driver_chipcommon_pmu.c
++++ b/drivers/ssb/driver_chipcommon_pmu.c
+@@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struc
+ chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
+ }
+
++/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
+ void ssb_pmu_init(struct ssb_chipcommon *cc)
+ {
+- struct ssb_bus *bus = cc->dev->bus;
+ u32 pmucap;
+
+ if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
+@@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon
+ ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
+ cc->pmu.rev, pmucap);
+
+- if (cc->pmu.rev >= 1) {
+- if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
+- chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+- ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+- } else {
+- chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+- SSB_CHIPCO_PMU_CTL_NOILPONW);
+- }
+- }
++ if (cc->pmu.rev == 1)
++ chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
++ ~SSB_CHIPCO_PMU_CTL_NOILPONW);
++ else
++ chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
++ SSB_CHIPCO_PMU_CTL_NOILPONW);
+ ssb_pmu_pll_init(cc);
+ ssb_pmu_resources_init(cc);
+ }
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
-@@ -834,6 +834,9 @@ int ssb_bus_pcibus_register(struct ssb_b
+@@ -486,11 +486,12 @@ static int ssb_devices_register(struct s
+ #ifdef CONFIG_SSB_PCIHOST
+ sdev->irq = bus->host_pci->irq;
+ dev->parent = &bus->host_pci->dev;
++ sdev->dma_dev = dev->parent;
+ #endif
+ break;
+ case SSB_BUSTYPE_PCMCIA:
+ #ifdef CONFIG_SSB_PCMCIAHOST
+- sdev->irq = bus->host_pcmcia->irq.AssignedIRQ;
++ sdev->irq = bus->host_pcmcia->irq;
+ dev->parent = &bus->host_pcmcia->dev;
+ #endif
+ break;
+@@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
+ break;
+ case SSB_BUSTYPE_SSB:
+ dev->dma_mask = &dev->coherent_dma_mask;
++ sdev->dma_dev = dev;
+ break;
+ }
+
+@@ -834,6 +836,9 @@ int ssb_bus_pcibus_register(struct ssb_b
if (!err) {
ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
"PCI device %s\n", dev_name(&host_pci->dev));
@@ -29,6 +451,87 @@
}
return err;
+@@ -1223,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_devic
+ }
+ EXPORT_SYMBOL(ssb_dma_translation);
+
+-int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
+-{
+-#ifdef CONFIG_SSB_PCIHOST
+- int err;
+-#endif
+-
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- err = pci_set_dma_mask(dev->bus->host_pci, mask);
+- if (err)
+- return err;
+- err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
+- return err;
+-#endif
+- case SSB_BUSTYPE_SSB:
+- return dma_set_mask(dev->dev, mask);
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+- return -ENOSYS;
+-}
+-EXPORT_SYMBOL(ssb_dma_set_mask);
+-
+-void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp_flags)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- if (gfp_flags & GFP_DMA) {
+- /* Workaround: The PCI API does not support passing
+- * a GFP flag. */
+- return dma_alloc_coherent(&dev->bus->host_pci->dev,
+- size, dma_handle, gfp_flags);
+- }
+- return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
+-#endif
+- case SSB_BUSTYPE_SSB:
+- return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+- return NULL;
+-}
+-EXPORT_SYMBOL(ssb_dma_alloc_consistent);
+-
+-void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- gfp_t gfp_flags)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- if (gfp_flags & GFP_DMA) {
+- /* Workaround: The PCI API does not support passing
+- * a GFP flag. */
+- dma_free_coherent(&dev->bus->host_pci->dev,
+- size, vaddr, dma_handle);
+- return;
+- }
+- pci_free_consistent(dev->bus->host_pci, size,
+- vaddr, dma_handle);
+- return;
+-#endif
+- case SSB_BUSTYPE_SSB:
+- dma_free_coherent(dev->dev, size, vaddr, dma_handle);
+- return;
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+-}
+-EXPORT_SYMBOL(ssb_dma_free_consistent);
+-
+ int ssb_bus_may_powerdown(struct ssb_bus *bus)
+ {
+ struct ssb_chipcommon *cc;
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -168,7 +168,7 @@ err_pci:
@@ -40,7 +543,7 @@
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
-@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus
+@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus
int i;
for (i = 0; i < bus->sprom_size; i++)
@@ -58,7 +561,7 @@
mmiowb();
msleep(20);
}
-@@ -621,6 +621,14 @@ static int ssb_pci_sprom_get(struct ssb_
+@@ -621,6 +621,28 @@ static int ssb_pci_sprom_get(struct ssb_
int err = -ENOMEM;
u16 *buf;
@@ -66,16 +569,30 @@
+ ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ return -ENODEV;
+ }
-+
-+ bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
-+ SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
++ if (bus->chipco.dev) { /* can be unavailible! */
++ /*
++ * get SPROM offset: SSB_SPROM_BASE1 except for
++ * chipcommon rev >= 31 or chip ID is 0x4312 and
++ * chipcommon status & 3 == 2
++ */
++ if (bus->chipco.dev->id.revision >= 31)
++ bus->sprom_offset = SSB_SPROM_BASE31;
++ else if (bus->chip_id == 0x4312 &&
++ (bus->chipco.status & 0x03) == 2)
++ bus->sprom_offset = SSB_SPROM_BASE31;
++ else
++ bus->sprom_offset = SSB_SPROM_BASE1;
++ } else {
++ bus->sprom_offset = SSB_SPROM_BASE1;
++ }
++ ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
+
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
goto out;
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
-@@ -176,3 +176,17 @@ const struct ssb_sprom *ssb_get_fallback
+@@ -176,3 +176,18 @@ const struct ssb_sprom *ssb_get_fallback
{
return fallback_sprom;
}
@@ -88,6 +605,7 @@
+ /* this routine differs from specs as we do not access SPROM directly
+ on PCMCIA */
+ if (bus->bustype == SSB_BUSTYPE_PCI &&
++ bus->chipco.dev && /* can be unavailible! */
+ bus->chipco.dev->id.revision >= 31)
+ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
+
@@ -95,6 +613,15 @@
+}
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
+@@ -167,7 +167,7 @@ struct ssb_device {
+ * is an optimization. */
+ const struct ssb_bus_ops *ops;
+
+- struct device *dev;
++ struct device *dev, *dma_dev;
+
+ struct ssb_bus *bus;
+ struct ssb_device_id id;
@@ -305,6 +305,7 @@ struct ssb_bus {
/* ID information about the Chip. */
u16 chip_id;
@@ -113,6 +640,177 @@
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
+@@ -466,14 +470,6 @@ extern u32 ssb_dma_translation(struct ss
+ #define SSB_DMA_TRANSLATION_MASK 0xC0000000
+ #define SSB_DMA_TRANSLATION_SHIFT 30
+
+-extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
+-
+-extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp_flags);
+-extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- gfp_t gfp_flags);
+-
+ static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
+ {
+ #ifdef CONFIG_SSB_DEBUG
+@@ -482,155 +478,6 @@ static inline void __cold __ssb_dma_not_
+ #endif /* DEBUG */
+ }
+
+-static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- return pci_dma_mapping_error(dev->bus->host_pci, addr);
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- return dma_mapping_error(dev->dev, addr);
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+- return -ENOSYS;
+-}
+-
+-static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
+- size_t size, enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- return pci_map_single(dev->bus->host_pci, p, size, dir);
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- return dma_map_single(dev->dev, p, size, dir);
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+- return 0;
+-}
+-
+-static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_unmap_single(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+- size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+- size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- /* Just sync everything. That's all the PCI API can do. */
+- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+- offset + size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
+- size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- /* Just sync everything. That's all the PCI API can do. */
+- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+- offset + size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
+- size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-
+ #ifdef CONFIG_SSB_PCIHOST
+ /* PCI-host wrapper driver */
+ extern int ssb_pcihost_register(struct pci_driver *driver);
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -53,6 +53,7 @@
diff --git a/target/linux/generic/patches-2.6.34/976-ssb_add_dma_dev.patch b/target/linux/generic/patches-2.6.34/976-ssb_add_dma_dev.patch
deleted file mode 100644
index fd463b4a7..000000000
--- a/target/linux/generic/patches-2.6.34/976-ssb_add_dma_dev.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
-
-Add dma_dev, a pointer to struct device, to struct ssb_device. We pass it
-to the generic DMA API with SSB_BUSTYPE_PCI and SSB_BUSTYPE_SSB.
-ssb_devices_register() sets up it properly.
-
-This is preparation for replacing the ssb bus specific DMA API (ssb_dma_*)
-with the generic DMA API.
-
-Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
-Acked-by: Michael Buesch <mb@bu3sch.de>
-Cc: Gary Zambrano <zambrano@broadcom.com>
-Cc: Stefano Brivio <stefano.brivio@polimi.it>
-Cc: Larry Finger <Larry.Finger@lwfinger.net>
-Cc: John W. Linville <linville@tuxdriver.com>
-Acked-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
-
- drivers/ssb/main.c | 2 ++
- include/linux/ssb/ssb.h | 2 +-
- 2 files changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/ssb/main.c
-+++ b/drivers/ssb/main.c
-@@ -486,6 +486,7 @@ static int ssb_devices_register(struct s
- #ifdef CONFIG_SSB_PCIHOST
- sdev->irq = bus->host_pci->irq;
- dev->parent = &bus->host_pci->dev;
-+ sdev->dma_dev = dev->parent;
- #endif
- break;
- case SSB_BUSTYPE_PCMCIA:
-@@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
- break;
- case SSB_BUSTYPE_SSB:
- dev->dma_mask = &dev->coherent_dma_mask;
-+ sdev->dma_dev = dev;
- break;
- }
-
---- a/include/linux/ssb/ssb.h
-+++ b/include/linux/ssb/ssb.h
-@@ -167,7 +167,7 @@ struct ssb_device {
- * is an optimization. */
- const struct ssb_bus_ops *ops;
-
-- struct device *dev;
-+ struct device *dev, *dma_dev;
-
- struct ssb_bus *bus;
- struct ssb_device_id id;
diff --git a/target/linux/generic/patches-2.6.35/975-ssb_update.patch b/target/linux/generic/patches-2.6.35/975-ssb_update.patch
new file mode 100644
index 000000000..0def7628a
--- /dev/null
+++ b/target/linux/generic/patches-2.6.35/975-ssb_update.patch
@@ -0,0 +1,708 @@
+--- a/drivers/net/b44.c
++++ b/drivers/net/b44.c
+@@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *)
+
+ static void b44_init_hw(struct b44 *, int);
+
+-static int dma_desc_align_mask;
+ static int dma_desc_sync_size;
+ static int instance;
+
+@@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for
+ unsigned long offset,
+ enum dma_data_direction dir)
+ {
+- ssb_dma_sync_single_range_for_device(sdev, dma_base,
+- offset & dma_desc_align_mask,
+- dma_desc_sync_size, dir);
++ dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
++ dma_desc_sync_size, dir);
+ }
+
+ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
+@@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for
+ unsigned long offset,
+ enum dma_data_direction dir)
+ {
+- ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
+- offset & dma_desc_align_mask,
+- dma_desc_sync_size, dir);
++ dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
++ dma_desc_sync_size, dir);
+ }
+
+ static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+@@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
+
+ BUG_ON(skb == NULL);
+
+- ssb_dma_unmap_single(bp->sdev,
+- rp->mapping,
+- skb->len,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev,
++ rp->mapping,
++ skb->len,
++ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+@@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *
+ if (skb == NULL)
+ return -ENOMEM;
+
+- mapping = ssb_dma_map_single(bp->sdev, skb->data,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+
+ /* Hardware bug work-around, the chip is unable to do PCI DMA
+ to/from anything above 1GB :-( */
+- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ /* Sigh... */
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping,
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+- mapping = ssb_dma_map_single(bp->sdev, skb->data,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
+- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
++ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+@@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *b
+ dest_idx * sizeof(*dest_desc),
+ DMA_BIDIRECTIONAL);
+
+- ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ }
+
+ static int b44_rx(struct b44 *bp, int budget)
+@@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int bu
+ struct rx_header *rh;
+ u16 len;
+
+- ssb_dma_sync_single_for_cpu(bp->sdev, map,
+- RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
++ RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ rh = (struct rx_header *) skb->data;
+ len = le16_to_cpu(rh->len);
+ if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
+@@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int bu
+ skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+ if (skb_size < 0)
+ goto drop_it;
+- ssb_dma_unmap_single(bp->sdev, map,
+- skb_size, DMA_FROM_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, map,
++ skb_size, DMA_FROM_DEVICE);
+ /* Leave out rx_header */
+ skb_put(skb, len + RX_PKT_OFFSET);
+ skb_pull(skb, RX_PKT_OFFSET);
+@@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct
+ goto err_out;
+ }
+
+- mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
++ mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ struct sk_buff *bounce_skb;
+
+ /* Chip can't handle DMA to/from >1GB, use bounce buffer */
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping, len,
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+ DMA_TO_DEVICE);
+
+ bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ if (!bounce_skb)
+ goto err_out;
+
+- mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
+- len, DMA_TO_DEVICE);
+- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+- if (!ssb_dma_mapping_error(bp->sdev, mapping))
+- ssb_dma_unmap_single(bp->sdev, mapping,
++ mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
++ len, DMA_TO_DEVICE);
++ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
++ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
++ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ goto err_out;
+@@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *b
+
+ if (rp->skb == NULL)
+ continue;
+- ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
+- DMA_FROM_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+@@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *b
+
+ if (rp->skb == NULL)
+ continue;
+- ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
++ DMA_TO_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+@@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *b
+ memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+- ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
++ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+- ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
++ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+
+ for (i = 0; i < bp->rx_pending; i++) {
+ if (b44_alloc_rx_skb(bp, -1, i) < 0)
+@@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b
+ bp->tx_buffers = NULL;
+ if (bp->rx_ring) {
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+- ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
++ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
+- bp->rx_ring, bp->rx_ring_dma,
+- GFP_KERNEL);
++ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
++ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
+ }
+ if (bp->tx_ring) {
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+- ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
++ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
+- bp->tx_ring, bp->tx_ring_dma,
+- GFP_KERNEL);
++ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
++ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
+ }
+@@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b
+ goto out_err;
+
+ size = DMA_TABLE_BYTES;
+- bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
++ bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
++ &bp->rx_ring_dma, gfp);
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+@@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b
+ if (!rx_ring)
+ goto out_err;
+
+- rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
+- DMA_TABLE_BYTES,
+- DMA_BIDIRECTIONAL);
++ rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
++ DMA_TABLE_BYTES,
++ DMA_BIDIRECTIONAL);
+
+- if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
+ rx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(rx_ring);
+ goto out_err;
+@@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
+
+- bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
++ bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
++ &bp->tx_ring_dma, gfp);
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to ssb_dma_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+@@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b
+ if (!tx_ring)
+ goto out_err;
+
+- tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
+- DMA_TABLE_BYTES,
+- DMA_TO_DEVICE);
++ tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
++ DMA_TABLE_BYTES,
++ DMA_TO_DEVICE);
+
+- if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
++ if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
+ tx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(tx_ring);
+ goto out_err;
+@@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct
+ "Failed to powerup the bus\n");
+ goto err_out_free_dev;
+ }
+- err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
+- if (err) {
++
++ if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
++ dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ dev_err(sdev->dev,
+ "Required 30BIT DMA mask unsupported by the system\n");
+ goto err_out_powerdown;
+ }
++
+ err = b44_get_invariants(bp);
+ if (err) {
+ dev_err(sdev->dev,
+@@ -2344,7 +2339,6 @@ static int __init b44_init(void)
+ int err;
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+- dma_desc_align_mask = ~(dma_desc_align_size - 1);
+ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
+ err = b44_pci_init();
+--- a/drivers/ssb/driver_chipcommon.c
++++ b/drivers/ssb/driver_chipcommon.c
+@@ -209,6 +209,24 @@ static void chipco_powercontrol_init(str
+ }
+ }
+
++/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
++static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
++{
++ struct ssb_bus *bus = cc->dev->bus;
++
++ switch (bus->chip_id) {
++ case 0x4312:
++ case 0x4322:
++ case 0x4328:
++ return 7000;
++ case 0x4325:
++ /* TODO: */
++ default:
++ return 15000;
++ }
++}
++
++/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
+ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
+ {
+ struct ssb_bus *bus = cc->dev->bus;
+@@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(stru
+
+ if (bus->bustype != SSB_BUSTYPE_PCI)
+ return;
++
++ if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
++ cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
++ return;
++ }
++
+ if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
+ return;
+
+@@ -235,6 +259,7 @@ void ssb_chipcommon_init(struct ssb_chip
+ return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
++ ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
+ ssb_pmu_init(cc);
+ chipco_powercontrol_init(cc);
+ ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
+--- a/drivers/ssb/driver_chipcommon_pmu.c
++++ b/drivers/ssb/driver_chipcommon_pmu.c
+@@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struc
+ chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
+ }
+
++/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
+ void ssb_pmu_init(struct ssb_chipcommon *cc)
+ {
+- struct ssb_bus *bus = cc->dev->bus;
+ u32 pmucap;
+
+ if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
+@@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon
+ ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
+ cc->pmu.rev, pmucap);
+
+- if (cc->pmu.rev >= 1) {
+- if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
+- chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+- ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+- } else {
+- chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+- SSB_CHIPCO_PMU_CTL_NOILPONW);
+- }
+- }
++ if (cc->pmu.rev == 1)
++ chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
++ ~SSB_CHIPCO_PMU_CTL_NOILPONW);
++ else
++ chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
++ SSB_CHIPCO_PMU_CTL_NOILPONW);
+ ssb_pmu_pll_init(cc);
+ ssb_pmu_resources_init(cc);
+ }
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -486,6 +486,7 @@ static int ssb_devices_register(struct s
+ #ifdef CONFIG_SSB_PCIHOST
+ sdev->irq = bus->host_pci->irq;
+ dev->parent = &bus->host_pci->dev;
++ sdev->dma_dev = dev->parent;
+ #endif
+ break;
+ case SSB_BUSTYPE_PCMCIA:
+@@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
+ break;
+ case SSB_BUSTYPE_SSB:
+ dev->dma_mask = &dev->coherent_dma_mask;
++ sdev->dma_dev = dev;
+ break;
+ }
+
+@@ -1226,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_devic
+ }
+ EXPORT_SYMBOL(ssb_dma_translation);
+
+-int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
+-{
+-#ifdef CONFIG_SSB_PCIHOST
+- int err;
+-#endif
+-
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- err = pci_set_dma_mask(dev->bus->host_pci, mask);
+- if (err)
+- return err;
+- err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
+- return err;
+-#endif
+- case SSB_BUSTYPE_SSB:
+- return dma_set_mask(dev->dev, mask);
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+- return -ENOSYS;
+-}
+-EXPORT_SYMBOL(ssb_dma_set_mask);
+-
+-void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp_flags)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- if (gfp_flags & GFP_DMA) {
+- /* Workaround: The PCI API does not support passing
+- * a GFP flag. */
+- return dma_alloc_coherent(&dev->bus->host_pci->dev,
+- size, dma_handle, gfp_flags);
+- }
+- return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
+-#endif
+- case SSB_BUSTYPE_SSB:
+- return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+- return NULL;
+-}
+-EXPORT_SYMBOL(ssb_dma_alloc_consistent);
+-
+-void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- gfp_t gfp_flags)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- if (gfp_flags & GFP_DMA) {
+- /* Workaround: The PCI API does not support passing
+- * a GFP flag. */
+- dma_free_coherent(&dev->bus->host_pci->dev,
+- size, vaddr, dma_handle);
+- return;
+- }
+- pci_free_consistent(dev->bus->host_pci, size,
+- vaddr, dma_handle);
+- return;
+-#endif
+- case SSB_BUSTYPE_SSB:
+- dma_free_coherent(dev->dev, size, vaddr, dma_handle);
+- return;
+- default:
+- __ssb_dma_not_implemented(dev);
+- }
+-}
+-EXPORT_SYMBOL(ssb_dma_free_consistent);
+-
+ int ssb_bus_may_powerdown(struct ssb_bus *bus)
+ {
+ struct ssb_chipcommon *cc;
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_
+ return -ENODEV;
+ }
+ if (bus->chipco.dev) { /* can be unavailible! */
+- bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
+- SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
++ /*
++ * get SPROM offset: SSB_SPROM_BASE1 except for
++ * chipcommon rev >= 31 or chip ID is 0x4312 and
++ * chipcommon status & 3 == 2
++ */
++ if (bus->chipco.dev->id.revision >= 31)
++ bus->sprom_offset = SSB_SPROM_BASE31;
++ else if (bus->chip_id == 0x4312 &&
++ (bus->chipco.status & 0x03) == 2)
++ bus->sprom_offset = SSB_SPROM_BASE31;
++ else
++ bus->sprom_offset = SSB_SPROM_BASE1;
+ } else {
+ bus->sprom_offset = SSB_SPROM_BASE1;
+ }
++ ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
+
+ buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
+ if (!buf)
+--- a/include/linux/ssb/ssb.h
++++ b/include/linux/ssb/ssb.h
+@@ -167,7 +167,7 @@ struct ssb_device {
+ * is an optimization. */
+ const struct ssb_bus_ops *ops;
+
+- struct device *dev;
++ struct device *dev, *dma_dev;
+
+ struct ssb_bus *bus;
+ struct ssb_device_id id;
+@@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ss
+ #define SSB_DMA_TRANSLATION_MASK 0xC0000000
+ #define SSB_DMA_TRANSLATION_SHIFT 30
+
+-extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
+-
+-extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp_flags);
+-extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- gfp_t gfp_flags);
+-
+ static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
+ {
+ #ifdef CONFIG_SSB_DEBUG
+@@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_
+ #endif /* DEBUG */
+ }
+
+-static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- return pci_dma_mapping_error(dev->bus->host_pci, addr);
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- return dma_mapping_error(dev->dev, addr);
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+- return -ENOSYS;
+-}
+-
+-static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
+- size_t size, enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- return pci_map_single(dev->bus->host_pci, p, size, dir);
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- return dma_map_single(dev->dev, p, size, dir);
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+- return 0;
+-}
+-
+-static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_unmap_single(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+- size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+- size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- /* Just sync everything. That's all the PCI API can do. */
+- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+- offset + size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
+- size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
+- dma_addr_t dma_addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dev->bus->bustype) {
+- case SSB_BUSTYPE_PCI:
+-#ifdef CONFIG_SSB_PCIHOST
+- /* Just sync everything. That's all the PCI API can do. */
+- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+- offset + size, dir);
+- return;
+-#endif
+- break;
+- case SSB_BUSTYPE_SSB:
+- dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
+- size, dir);
+- return;
+- default:
+- break;
+- }
+- __ssb_dma_not_implemented(dev);
+-}
+-
+-
+ #ifdef CONFIG_SSB_PCIHOST
+ /* PCI-host wrapper driver */
+ extern int ssb_pcihost_register(struct pci_driver *driver);
diff --git a/target/linux/generic/patches-2.6.35/976-ssb_add_dma_dev.patch b/target/linux/generic/patches-2.6.35/976-ssb_add_dma_dev.patch
deleted file mode 100644
index fd463b4a7..000000000
--- a/target/linux/generic/patches-2.6.35/976-ssb_add_dma_dev.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
-
-Add dma_dev, a pointer to struct device, to struct ssb_device. We pass it
-to the generic DMA API with SSB_BUSTYPE_PCI and SSB_BUSTYPE_SSB.
-ssb_devices_register() sets up it properly.
-
-This is preparation for replacing the ssb bus specific DMA API (ssb_dma_*)
-with the generic DMA API.
-
-Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
-Acked-by: Michael Buesch <mb@bu3sch.de>
-Cc: Gary Zambrano <zambrano@broadcom.com>
-Cc: Stefano Brivio <stefano.brivio@polimi.it>
-Cc: Larry Finger <Larry.Finger@lwfinger.net>
-Cc: John W. Linville <linville@tuxdriver.com>
-Acked-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
-
- drivers/ssb/main.c | 2 ++
- include/linux/ssb/ssb.h | 2 +-
- 2 files changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/ssb/main.c
-+++ b/drivers/ssb/main.c
-@@ -486,6 +486,7 @@ static int ssb_devices_register(struct s
- #ifdef CONFIG_SSB_PCIHOST
- sdev->irq = bus->host_pci->irq;
- dev->parent = &bus->host_pci->dev;
-+ sdev->dma_dev = dev->parent;
- #endif
- break;
- case SSB_BUSTYPE_PCMCIA:
-@@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
- break;
- case SSB_BUSTYPE_SSB:
- dev->dma_mask = &dev->coherent_dma_mask;
-+ sdev->dma_dev = dev;
- break;
- }
-
---- a/include/linux/ssb/ssb.h
-+++ b/include/linux/ssb/ssb.h
-@@ -167,7 +167,7 @@ struct ssb_device {
- * is an optimization. */
- const struct ssb_bus_ops *ops;
-
-- struct device *dev;
-+ struct device *dev, *dma_dev;
-
- struct ssb_bus *bus;
- struct ssb_device_id id;